filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_22192 | # Copyright 2021 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenFOAM Benchmark.
OpenFOAM is a C++ toolbox for the development of customized numerical solvers,
and pre-/post-processing utilities for the solution of continuum mechanics
problems, most prominently including computational fluid dynamics.
See: https://openfoam.org/
This benchmark runs a motorbike simulation that is popularly used to measure
scalability of OpenFOAM across multiple cores. Since this is a complex
computation, make sure to use a compute-focused machine-type that has multiple
cores before attempting to run.
"""
import collections
import logging
import posixpath
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import hpc_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import openfoam
from perfkitbenchmarker.linux_packages import openmpi
_DEFAULT_CASE = 'motorbike'
_CASE_PATHS = {
'motorbike': 'tutorials/incompressible/simpleFoam/motorBike',
}
assert _DEFAULT_CASE in _CASE_PATHS
FLAGS = flags.FLAGS
flags.DEFINE_enum('openfoam_case', _DEFAULT_CASE,
sorted(list(_CASE_PATHS.keys())),
'Name of the OpenFOAM case to run.')
flags.DEFINE_list('openfoam_dimensions', ['20_8_8'], 'Dimensions of the case.')
flags.DEFINE_integer(
'openfoam_num_threads_per_vm', None,
'The number of threads per VM to run OpenFOAM with. If None, defaults to '
'half the total number of vCPUs available.')
flags.DEFINE_string(
'openfoam_mpi_mapping', 'core:SPAN',
'Mpirun process mapping to use as arguments to "mpirun --map-by".')
flags.DEFINE_enum(
'openfoam_decomp_method', 'scotch', ['scotch', 'hierarchical', 'simple'],
'Decomposition method to use in decomposePar. See: '
'https://cfd.direct/openfoam/user-guide/v7-running-applications-parallel/')
flags.DEFINE_integer(
'openfoam_max_global_cells', 200 * 1000 * 1000,
'The maximum number of refinement cells to use in snappHexMeshDict. See: '
'https://cfd.direct/openfoam/user-guide/v6-snappyhexmesh/')
BENCHMARK_NAME = 'openfoam'
_BENCHMARK_ROOT = '$HOME/OpenFOAM/run'
BENCHMARK_CONFIG = f"""
openfoam:
description: Runs an OpenFOAM benchmark.
vm_groups:
default:
vm_spec:
GCP:
machine_type: c2-standard-8
zone: us-east1-c
boot_disk_size: 100
Azure:
machine_type: Standard_F8s_v2
zone: eastus2
boot_disk_size: 100
AWS:
machine_type: c5.2xlarge
zone: us-east-1f
boot_disk_size: 100
os_type: ubuntu1604
vm_count: 2
disk_spec:
GCP:
disk_type: nfs
nfs_managed: False
mount_point: {_BENCHMARK_ROOT}
Azure:
disk_type: nfs
nfs_managed: False
mount_point: {_BENCHMARK_ROOT}
AWS:
disk_type: nfs
nfs_managed: False
mount_point: {_BENCHMARK_ROOT}
"""
_MACHINE_FILE = posixpath.join(_BENCHMARK_ROOT, 'MACHINEFILE')
_RUN_SCRIPT = 'Allrun'
_BLOCK_MESH_DICT = 'system/blockMeshDict'
_DECOMPOSE_DICT = 'system/decomposeParDict'
_SNAPPY_HEX_MESH_DICT = 'system/snappyHexMeshDict'
_SSH_CONFIG_CMD = ('echo "LogLevel ERROR\nHost *\n IdentitiesOnly yes\n" | '
'tee -a $HOME/.ssh/config')
_RUN_SCRIPT_EXCLUDED_PREFIXES = ['#', '.', 'cd']
_RUN_SCRIPT_VALID_COMMANDS = [
'cp', 'surfaceFeatures', 'blockMesh', 'decomposePar', 'snappyHexMesh',
'patchSummary', 'potentialFoam', '$(getApplication)', 'reconstructParMesh',
'reconstructPar'
]
def GetConfig(user_config):
"""Returns the configuration of a benchmark."""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['num_vms'].present:
config['vm_groups']['default']['vm_count'] = FLAGS.num_vms
return config
@flags.validator('openfoam_dimensions')
def _CheckDimensions(dimensions_list):
# throws InvalidValue if an entry is not correct
for dimensions in dimensions_list:
_ParseDimensions(dimensions)
return True
def _ParseDimensions(dimensions):
"""Parse and validate an individual dimensions entry.
Args:
dimensions: String formatted as "_" separated integers like: '80_20_20'.
Returns:
Parsed dimensions like: '80 20 20'.
Raises:
errors.Config.InvalidValue: If input dimensions are incorrectly formatted.
"""
dimensions = dimensions.split('_')
if not all(value.isdigit() for value in dimensions):
raise errors.Config.InvalidValue(
'Expected list of ints separated by "_" in --openfoam_dimensions '
f'but received {dimensions}.')
return ' '.join(dimensions)
def Prepare(benchmark_spec):
"""Prepares the VMs and other resources for running the benchmark.
This is a good place to download binaries onto the VMs, create any data files
needed for a benchmark run, etc.
Args:
benchmark_spec: The benchmark spec for this sample benchmark.
"""
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: vm.Install('openfoam'), vms)
# Allow ssh access to other vms.
vm_util.RunThreaded(lambda vm: vm.AuthenticateVm(), vms)
# Avoids printing ssh warnings and prevents too many auth errors.
vm_util.RunThreaded(lambda vm: vm.RemoteCommand(_SSH_CONFIG_CMD), vms)
# Tell mpirun about other nodes.
hpc_util.CreateMachineFile(vms, remote_path=_MACHINE_FILE)
def _GetSample(line):
"""Parse a single output line into a performance sample.
Input format:
real 100.00
Args:
line: A single line from the OpenFOAM timing output.
Returns:
A single performance sample, with times in ms.
"""
runtime_category, runtime_output = line.split()
try:
runtime_seconds = int(float(runtime_output))
except:
raise ValueError(
f'Output "{line}" does not match expected format "real 100.00".')
logging.info('Runtime of %s seconds from [%s, %s]', runtime_seconds,
runtime_category, runtime_output)
runtime_category = 'time_' + runtime_category
return sample.Sample(runtime_category, runtime_seconds, 'seconds')
def _GetSamples(output):
"""Parse the output and return performance samples.
Output is in the format (example numbers):
real 100.00
user 60.55
sys 99.31
Args:
output: The output from running the OpenFOAM benchmark.
Returns:
A list of performance samples.
"""
return [_GetSample(line) for line in output.strip().splitlines()]
def _GetOpenfoamVersion(vm):
"""Get the installed OpenFOAM version from the vm."""
return vm.RemoteCommand('echo $WM_PROJECT_VERSION')[0].rstrip()
def _GetWorkingDirPath():
"""Get the base directory name of the case being run."""
case_dir_name = posixpath.basename(_CASE_PATHS[FLAGS.openfoam_case])
return posixpath.join(_BENCHMARK_ROOT, case_dir_name)
def _GetPath(openfoam_file):
"""Get the absolute path to the file in the working directory."""
return posixpath.join(_GetWorkingDirPath(), openfoam_file)
def _SetDictEntry(vm, key, value, dict_file_name):
"""Sets an entry in an OpenFOAM dictionary file.
Args:
vm: The VM to set the entry on.
key: String; name of the key to set (like hierarchicalCoeffs.n).
value: String; the value to set.
dict_file_name: String; name of the file to set the specified entry. This
file should be in the working directory. Example: system/snappyHexMeshDict
"""
vm.RemoteCommand(
f'foamDictionary -entry {key} -set "{value}" {_GetPath(dict_file_name)}')
def _UseMpi(vm, num_processes, mapping):
"""Configure OpenFOAM to use MPI if running with more than 1 VM.
This function looks for the word "runParallel" in the run script and replaces
it with an mpirun command.
Args:
vm: The worker VM to use MPI on.
num_processes: An integer representing the total number of processes for the
MPI job.
mapping: A string for the mpirun --map-by flag.
"""
run_script = _GetPath(_RUN_SCRIPT)
vm_util.ReplaceText(
vm, 'runParallel', 'mpirun '
f'-hostfile {_MACHINE_FILE} '
'-mca btl ^openib '
f'--map-by {mapping} '
f'-np {num_processes}', run_script, '|')
vm_util.ReplaceText(vm, '^mpirun.*', '& -parallel', run_script)
def _GetBaseCommand(command):
"""Returns a base OpenFOAM command.
Example:
command "mpirun -hostfile /home/perfkit/OpenFOAM/run/MACHINEFILE -mca btl
^openib --map-by core:SPAN -np 16 potentialFoam -parallel"
returns "potentialFoam"
Args:
command: String, the command to parse.
Returns:
The base OpenFOAM command from _RUN_SCRIPT_VALID_COMMANDS.
"""
for base_command in _RUN_SCRIPT_VALID_COMMANDS:
if base_command in command:
return base_command
raise ValueError(f'Unrecognized command in "{command}", please add it to '
'_RUN_SCRIPT_VALID_COMMANDS')
def _RunCommand(vm, command):
"""Runs a valid OpenFOAM command, returning samples."""
_, output = vm.RemoteCommand(
f'cd {_GetWorkingDirPath()} && time -p {command}')
results = _GetSamples(output)
for result in results:
result.metadata['full_command'] = command
result.metadata['command'] = _GetBaseCommand(command)
return results
def _IsValidCommand(command):
if not command:
return False
for prefix in _RUN_SCRIPT_EXCLUDED_PREFIXES:
if command.startswith(prefix):
return False
return True
def _ParseRunCommands(vm, remote_run_file):
"""Parses OpenFOAM run commands from a case's Allrun file."""
local_destination = vm_util.PrependTempDir(_RUN_SCRIPT)
vm.PullFile(local_destination, remote_run_file)
commands = []
for command in open(local_destination):
command = command.strip('\n')
if _IsValidCommand(command):
commands.append(command)
logging.info('Parsed run commands from %s:\n%s', remote_run_file, commands)
return commands
def _GenerateFullRuntimeSamples(samples):
"""Append the full runtime results to samples."""
assert samples, f'{samples} should not be an empty list'
counts = collections.Counter()
for s in samples:
counts[s.metric] += s.value
for metric in ('time_real', 'time_user', 'time_sys'):
samples.append(sample.Sample(metric, counts[metric], 'seconds'))
def _RunCase(master_vm, dimensions):
"""Runs the case with the given dimensions.
This function automatically looks for the "Allrun" script in the working
directory.
Args:
master_vm: The vm to run the case commands on. If using the default NFS
server, it doesn't actually matter which vm this is.
dimensions: A string of the dimensions to run with. Like "100 24 24".
Returns:
A list of performance samples for the given dimensions.
"""
dims_entry = ('( hex ( 0 1 2 3 4 5 6 7 ) '
f'( {_ParseDimensions(dimensions)} ) '
'simpleGrading ( 1 1 1 ) )')
_SetDictEntry(master_vm, 'blocks', dims_entry, _BLOCK_MESH_DICT)
master_vm.RemoteCommand(f'cd {_GetWorkingDirPath()} && ./Allclean')
results = []
run_script_path = _GetPath(_RUN_SCRIPT)
for command in _ParseRunCommands(master_vm, run_script_path):
command_results = _RunCommand(master_vm, command)
results.extend(command_results)
_GenerateFullRuntimeSamples(results)
# Update every run with run-specific metadata.
for result in results:
result.metadata['dimensions'] = dimensions
return results
def Run(benchmark_spec):
"""Runs the benchmark and returns a dict of performance data.
It must be possible to run the benchmark multiple times after the Prepare
stage. This method runs a single case with multiple dimensions.
Args:
benchmark_spec: The benchmark spec for the OpenFOAM benchmark.
Returns:
A list of performance samples.
"""
vms = benchmark_spec.vms
master_vm = vms[0]
num_vms = len(vms)
# Run configuration metadata:
num_cpus_available = num_vms * master_vm.NumCpusForBenchmark()
if FLAGS.openfoam_num_threads_per_vm is None:
num_cpus_to_use = num_cpus_available // 2
else:
num_cpus_to_use = num_vms * FLAGS.openfoam_num_threads_per_vm
case_name = FLAGS.openfoam_case
mpi_mapping = FLAGS.openfoam_mpi_mapping
decomp_method = FLAGS.openfoam_decomp_method
max_global_cells = FLAGS.openfoam_max_global_cells
openfoam_version = _GetOpenfoamVersion(master_vm)
openmpi_version = openmpi.GetMpiVersion(master_vm)
common_metadata = {
'case_name': case_name,
'decomp_method': decomp_method,
'max_global_cells': max_global_cells,
'mpi_mapping': mpi_mapping,
'openfoam_version': openfoam_version,
'openmpi_version': openmpi_version,
'total_cpus_available': num_cpus_available,
'total_cpus_used': num_cpus_to_use,
}
logging.info('Running %s case on %s/%s cores on %s vms', case_name,
num_cpus_to_use, num_cpus_available, num_vms)
logging.info('Common metadata: %s', common_metadata)
# Copy the run directory.
case_path = posixpath.join(openfoam.OPENFOAM_ROOT, _CASE_PATHS[case_name])
master_vm.RemoteCommand(f'cp -r {case_path} {_BENCHMARK_ROOT}')
# Configure common parameters.
_SetDictEntry(master_vm, 'method', decomp_method, _DECOMPOSE_DICT)
_SetDictEntry(master_vm, 'numberOfSubdomains', num_cpus_to_use,
_DECOMPOSE_DICT)
_SetDictEntry(master_vm, 'hierarchicalCoeffs.n', f'({num_cpus_to_use} 1 1)',
_DECOMPOSE_DICT)
_SetDictEntry(master_vm, 'castellatedMeshControls.maxGlobalCells',
max_global_cells, _SNAPPY_HEX_MESH_DICT)
_UseMpi(master_vm, num_cpus_to_use, mpi_mapping)
# Run and gather samples.
samples = []
for dimensions in FLAGS.openfoam_dimensions:
results = _RunCase(master_vm, dimensions)
# Update every case run with common metadata.
for result in results:
result.metadata.update(common_metadata)
samples.extend(results)
return samples
def Cleanup(benchmark_spec):
"""Cleans up after the benchmark completes.
The state of the VMs should be equivalent to the state before Prepare was
called.
Args:
benchmark_spec: The benchmark spec for the OpenFOAM benchmark.
"""
del benchmark_spec
|
the-stack_106_22193 | # coding: utf-8
from __future__ import print_function
# numerical
import numpy as np
from scipy.optimize import minimize_scalar, minimize
from scipy.spatial.distance import euclidean, pdist, cdist, squareform
from numpy import array
from operator import itemgetter
# niching benchmarking
from lib.niching_func import niching_func
from lib.count_goptima import count_goptima
from numba import autojit
import Startup as St
# Distance metric
@autojit
def LineSimple( x1, y1, x2, y2 ):
eps = 1e-20
xm = 0.5*(x2+x1)
sumx = ((x2-x1)**2).sum()
sumy = (y2-y1)**2
sumdy = (0.5*(y1+y2) - f(xm))**2
return np.sqrt(sumdy*sumx/(sumy+eps))
@autojit
def feaseable(x):
D = x.shape[0]
return np.all(x >= f.lb[:D]) and np.all(x <= f.ub[:D])
@autojit
def genDir(dim):
dplus = np.random.uniform(low=-1.0,high=1.0,size=dim)
return dplus/np.sqrt((dplus**2).sum())
def optNode(X, Y, Adj, lastidx, nodes, step, thr):
nevals = 0
for i in np.arange(nodes.shape[0]):
x = np.copy(X[nodes[i],:])
y = np.copy(Y[nodes[i]])
dim = x.shape[0]
d = genDir(dim)
maxf = lambda alpha: -LineSimple(x,y,x+alpha*d, f(x+alpha*d))
rl, ru = (f.lb[:dim] - x)/d, (f.ub[:dim] - x)/d
if rl[rl>=0].shape[0] == 0 and ru[ru>=0].shape[0] == 0:
continue
r = np.append(rl[rl>=0], ru[ru>=0]).min()
if r == 0:
continue
n = int(1 + (np.log(r) - np.log(step)) / np.log(2))
rs = step*(2.0**np.arange(n-1,-1,-1))
rs2 = np.minimum(rs*2.0, r*np.ones(rs.shape[0]))
for j in np.arange(rs.shape[0]):
ri = rs[j]
ri2 = rs2[j]
if ri2 < ri:
continue
alpha = minimize_scalar(maxf, bounds=(ri,ri2), method='bounded')
nevals += alpha.nfev
alpha = alpha.x
x2 = x + alpha*d
xm = (x+x2)/2.0
y2 = f(x2)
ym = f(xm)
if ym > y2 and ym > y:
X[nodes[i],:] = np.copy(xm)
Y[nodes[i]] = np.copy(ym)
elif ym< y2 and ym < y:
dist = cdist(np.array([xm]), X[:lastidx,:], 'euclidean')[0]
closest = np.argmin(dist)
if dist[closest] <= thr and y2 > y:
X[nodes[i],:] = np.copy(x2)
Y[nodes[i]] = np.copy(y2)
elif lastidx < 1000:
X[lastidx,:] = np.copy(x2)
Y[lastidx] = np.copy(y2)
Adj[closest,lastidx] = 1
Adj[lastidx,closest] = 1
lastidx += 1
elif y2 > ym and y2 > y:
X[nodes[i],:] = np.copy(x2)
Y[nodes[i]] = np.copy(y2)
return lastidx, nevals
@autojit
def candidateNodes(X, Y, Adj, lastidx, npop):
if lastidx < npop:
return np.arange(lastidx)
degree = lastidx - Adj[:lastidx,:lastidx].sum(axis=0) + 1.0
degree = degree[:lastidx]
candnodes = np.random.choice(np.arange(lastidx), npop,
p=degree/degree.sum())
return candnodes
@autojit
def Supress(X, Y, Adj, lastidx, thr, thrL, ls):
tabu = np.zeros(lastidx)
flag = np.zeros(lastidx)
idx = 0
dist = squareform(pdist(X[:lastidx,:], 'euclidean'))
ldist = np.zeros((lastidx,lastidx))
for i in np.arange(lastidx-1):
for j in np.arange(i+1,lastidx):
ldist[i,j] = LineSimple(X[i],Y[i],X[j],Y[j])
ldist[j,i] = ldist[i,j]
for i in np.arange(lastidx):
if tabu[i]==0:
x, y = X[i], Y[i]
idx = np.where(np.logical_or(dist[i]<=thr,ldist[i]<=thrL))[0]
maxI = idx[np.argmax(Y[idx])]
flag[maxI]=1
tabu[idx]=1
idx = np.where(flag==1)[0]
lastidx=idx.shape[0]
X[:lastidx] = np.copy(X[idx])
Y[:lastidx] = np.copy(Y[idx])
X[:lastidx], Y[:lastidx], nv = St.SciOpt(X[:lastidx], Y[:lastidx])
Adj = np.zeros((1000,1000))
if lastidx>1:
for i in np.arange(lastidx):
idx = np.argsort(cdist(np.array([X[i,:]]),X[:lastidx,:],
'euclidean'))[0][1]
Adj[i,idx] = 1
Adj[idx,i] = 1
#nv = 0
#if ls:
# X[:lastidx], Y[:lastidx], nv = St.CMAOpt(X[:lastidx], Y[:lastidx], Adj)
return lastidx, nv
@autojit
def LinkedLineOpt(maxit, dim, npop, step, thr, thrL,mute):
maxpop = 1000
X = np.zeros((maxpop,dim))
Y = np.zeros(maxpop)
Adj = np.zeros((maxpop,maxpop))
lastidx = 1
X[0,:] = f.lb[:dim] + (f.ub[:dim]-f.lb[:dim]) * 0.5 * np.ones(dim)
Y[0] = f(X[0])
nevals = 0
for it in range(maxit):
nodes = candidateNodes(X, Y, Adj, lastidx, npop)
lastidx, nevals2 = optNode(X, Y, Adj, lastidx, nodes, step, thr)
nevals += nevals2
if it%20 == 0 and lastidx > 50:
lastidx, nv = Supress(X, Y, Adj, lastidx, thr, thrL, True)
nevals += nv
if it % 5 == 0 and not mute:
cg = cgopt1(X[:lastidx])
print(lastidx, cg, nevals)
if cg >= nopt*0.5:
break
lastidx, nv = Supress(X, Y, Adj, lastidx, thr, thrL, True)
nevals += nv
if not mute:
print("end")
print(lastidx, cgopt1(X[:lastidx]), nevals)
return X[:lastidx,:], Y[:lastidx], nevals
|
the-stack_106_22194 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=22
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.Y.on(input_qubit[2])) # number=13
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.H.on(input_qubit[1])) # number=16
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.Y.on(input_qubit[1])) # number=14
c.append(cirq.Y.on(input_qubit[1])) # number=15
c.append(cirq.H.on(input_qubit[0])) # number=19
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=21
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=18
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =4000
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class982.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
the-stack_106_22195 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
./test_code.py --fast; red_green_bar.py $? $COLUMNS
red_green_bar.py is taken from https://github.com/kwadrat/rgb_tdd.git
"""
import sys
if sys.version_info[0] < 3:
print("You need Python 3 to run this script.")
sys.exit(1)
import unittest
from pathlib import Path
from test_eisenstein import TestEisensteinNumbers
from test_eisenstein_fractions import TestEisensteinFractionNumbers
from test_eisenstein_operations import TestEisensteinFractionTimeSeriesOperations
import parameters
def runningInTravis():
home = str(Path.home())
fields = home.strip().split("/")
if "travis" in fields:
return True
return False
fast_test_ls = [TestEisensteinNumbers, TestEisensteinFractionNumbers]
slow_test_ls = [TestEisensteinFractionTimeSeriesOperations]
def add_all_fast(suite):
for one_test in fast_test_ls:
suite.addTest(unittest.makeSuite(one_test))
def add_all_slow(suite):
for one_test in slow_test_ls:
suite.addTest(unittest.makeSuite(one_test))
def summary_status(suite):
text_test_result = unittest.TextTestRunner().run(suite)
return not not (text_test_result.failures or text_test_result.errors)
def perform_only_fast_tests():
suite = unittest.TestSuite()
add_all_fast(suite)
return summary_status(suite)
def perform_tests():
suite = unittest.TestSuite()
add_all_fast(suite)
add_all_slow(suite)
return summary_status(suite)
if __name__ == "__main__":
result = 1 # assuming failure of test script
if len(sys.argv) >= 2 and sys.argv[1] == "--fast":
result = perform_only_fast_tests()
elif len(sys.argv) >= 3 and sys.argv[1] == "--setscale":
TestRange = int(sys.argv[2])
parameters.cfg_prm.set_range(TestRange)
result = perform_tests()
elif runningInTravis():
print("Wow! We are under Travis CI!")
TestRange = 10
parameters.cfg_prm.set_range(TestRange)
result = perform_tests()
else:
result = perform_tests() # go ahead with defaults
sys.exit(result)
|
the-stack_106_22200 | import sys
import configparser
import RPi.GPIO as GPIO
import time
config_file = sys.argv[1]
config = configparser.ConfigParser()
config.read(config_file)
led_pin = int(config['SETTINGS']['led_pin'])
delay = float(config['SETTINGS']['delay'])
exception_sleep_time = float(config['SETTINGS']['exception_sleep_time'])
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(led_pin,GPIO.OUT)
while True:
try:
GPIO.output(led_pin,GPIO.HIGH)
time.sleep(delay)
GPIO.output(led_pin,GPIO.LOW)
time.sleep(delay)
except Exception:
time.sleep(exception_sleep_time)
finally:
GPIO.output(led_pin,GPIO.LOW)
|
the-stack_106_22203 | import random
def typoglycemia(sentence):
transformed = []
for word in sentence.split():
if len(word) > 4:
head, middle, last = word[0], list(word[1:-1]), word[-1]
random.shuffle(middle)
word = head + ''.join(middle) + last
transformed.append(word)
return ' '.join(transformed)
if __name__ == '__main__':
sentence = "I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind ."
print(typoglycemia(sentence))
|
the-stack_106_22205 | import Image
def vectorToRGBA(vec, divider = 5):
x1 = x2 = y1 = y2 = 0.0
if vec[0] < 0:
x2 = -vec[0]
else:
x1 = vec[0]
if vec[1] < 0:
y2 = -vec[1]
else:
y1 = vec[1]
return (int(x2*255.0)/divider, int(x1*255.0)/divider, int(y2*255.0)/divider, int(y1*255.0)/divider)
def RGToVec(RGBA):
r = float(RGBA[0])/256.0
g = float(RGBA[1])/256.0
x = (0.5 - r);
y = -(0.5 - g);
return (x, y)
in_path = "C:\Python27\Distortion\distortion_test.png"
in_img = Image.open(in_path)
img = Image.new('RGBA',in_img.size)
in_pix = in_img.load()
pix = img.load()
for x in range(img.size[0]):
for y in range(img.size[1]):
vec = RGToVec(in_pix[x, y])
color = vectorToRGBA(vec)
pix[x, y] = color
img.save('distortion1.png', 'PNG')
|
the-stack_106_22207 | from const import *
import numpy as np
class HaasoscopeOversample():
def __init__(self):
self.dooversample=np.zeros(HAAS_NUM_BOARD*HAAS_NUM_CHAN_PER_BOARD, dtype=int) # 1 is oversampling, 0 is no oversampling, 9 is over-oversampling
def ToggleOversamp(self,chan):
#tell it to toggle oversampling for this channel
chanonboard = chan%HAAS_NUM_CHAN_PER_BOARD
if chanonboard>1: return False
if chanonboard==1 and self.dooversample[chan] and self.dooversample[chan-1]==9: print(("first disable over-oversampling on channel",chan-1)); return False
# self.togglechannel(chan+2,True)
self.dooversample[chan] = not self.dooversample[chan];
print(("oversampling is now",self.dooversample[chan],"for channel",chan))
return True
# if self.dooversample[chan] and self.downsample>0: self.telldownsample(0) # must be in max sampling mode for oversampling to make sense
# frame=[]
# frame.append(141)
# firmchan=self.getfirmchan(chan)
# frame.append(firmchan)
# self.ser.write(frame)
# self.drawtext()
# self.figure.canvas.draw()
def oversample(self,ydata,c1,c2):
tempc1=ydata[c1]
tempc2=ydata[c2]
adjustmeanandrms=True
if adjustmeanandrms:
mean_c1 = np.mean(tempc1)
rms_c1 = np.sqrt(np.mean((tempc1-mean_c1)**2))
mean_c2 = np.mean(tempc2)
rms_c2 = np.sqrt(np.mean((tempc2-mean_c2)**2))
meanmean=(mean_c1+mean_c2)/2.
meanrms=(rms_c1+rms_c2)/2.
tempc1=meanrms*(tempc1-mean_c1)/rms_c1 + meanmean
tempc2=meanrms*(tempc2-mean_c2)/rms_c2 + meanmean
# print (mean_c1, mean_c2, rms_c1, rms_c2)
mergedsamps=np.empty(HAAS_NUM_SAMPLES*2)
mergedsamps[0:HAAS_NUM_SAMPLES*2:2]=tempc1 # a little tricky which is 0 and which is 1 (i.e. which is sampled first!)
mergedsamps[1:HAAS_NUM_SAMPLES*2:2]=tempc2
ydata[c1]=mergedsamps[0:HAAS_NUM_SAMPLES]
ydata[c2]=mergedsamps[HAAS_NUM_SAMPLES:HAAS_NUM_SAMPLES*2]
def TryOversample(self,board,ydata):
if self.dooversample[HAAS_NUM_CHAN_PER_BOARD*(HAAS_NUM_BOARD-board-1)]: self.oversample(ydata,0,2)
if self.dooversample[HAAS_NUM_CHAN_PER_BOARD*(HAAS_NUM_BOARD-board-1)+1]: self.oversample(ydata,1,3)
def overoversample(self,c1,c2):
tempc1=np.concatenate([self.ydata[c1],self.ydata[c1+2]])
tempc2=np.concatenate([self.ydata[c2],self.ydata[c2+2]])
adjustmeanandrms=True
if adjustmeanandrms:
mean_c1 = np.mean(tempc1)
rms_c1 = np.sqrt(np.mean((tempc1-mean_c1)**2))
mean_c2 = np.mean(tempc2)
rms_c2 = np.sqrt(np.mean((tempc2-mean_c2)**2))
meanmean=(mean_c1+mean_c2)/2.
meanrms=(rms_c1+rms_c2)/2.
tempc1=meanrms*(tempc1-mean_c1)/rms_c1 + meanmean
tempc2=meanrms*(tempc2-mean_c2)/rms_c2 + meanmean
#print mean_c1, mean_c2, rms_c1, rms_c2
ns=2*HAAS_NUM_SAMPLES
mergedsamps=np.empty(ns*2)
mergedsamps[0:ns*2:2]=tempc1 # a little tricky which is 0 and which is 1 (i.e. which is sampled first!)
mergedsamps[1:ns*2:2]=tempc2
self.ydata[c1]=mergedsamps[0:ns/2]
self.ydata[c2]=mergedsamps[ns/2:ns]
self.ydata[c1+2]=mergedsamps[ns:3*ns/2]
self.ydata[c2+2]=mergedsamps[3*ns/2:ns*2] |
the-stack_106_22209 | from __future__ import print_function
import sys
from nose.tools import assert_equal, assert_true, assert_false, assert_is_instance, assert_multi_line_equal
from six import StringIO
import sqlparse
from sqlparse import tokens as T
from sqlparse.sql import Token, TokenList, Parenthesis
from mbdata.utils.sql import (
group_parentheses,
parse_statements,
Set,
CreateTable,
CreateType,
CreateIndex,
)
def test_group_parentheses():
tokens = [
Token(T.Keyword, 'CREATE'),
Token(T.Whitespace, ' '),
Token(T.Keyword, 'TABLE'),
Token(T.Whitespace, ' '),
Token(T.Name, 'table_name'),
Token(T.Whitespace, ' '),
Token(T.Punctuation, '('),
Token(T.Name, 'id'),
Token(T.Whitespace, ' '),
Token(T.Keyword, 'SERIAL'),
Token(T.Whitespace, ' '),
Token(T.Keyword, 'CHECK'),
Token(T.Punctuation, '('),
Token(T.Name, 'id'),
Token(T.Operator, '='),
Token(T.Number, '0'),
Token(T.Punctuation, ')'),
Token(T.Punctuation, ')'),
Token(T.Punctuation, ';'),
]
expected_tokens = TokenList([
Token(T.Keyword, 'CREATE'),
Token(T.Keyword, 'TABLE'),
Token(T.Name, 'table_name'),
Parenthesis([
Token(T.Punctuation, '('),
Token(T.Name, 'id'),
Token(T.Keyword, 'SERIAL'),
Token(T.Keyword, 'CHECK'),
Parenthesis([
Token(T.Punctuation, '('),
Token(T.Name, 'id'),
Token(T.Operator, '='),
Token(T.Number, '0'),
Token(T.Punctuation, ')'),
]),
Token(T.Punctuation, ')'),
]),
Token(T.Punctuation, ';'),
])
grouped_tokens = group_parentheses(tokens)
stdout = sys.stdout
try:
sys.stdout = StringIO()
expected_tokens._pprint_tree()
a = sys.stdout.getvalue()
sys.stdout = StringIO()
grouped_tokens._pprint_tree()
b = sys.stdout.getvalue()
finally:
sys.stdout = stdout
assert_multi_line_equal(a, b)
def test_parse_statements():
sql = '''
SET search_path = 'cover_art_archive';
CREATE TABLE table_name (
id SERIAL, -- PK
name VARCHAR
);
CREATE TYPE FLUENCY AS ENUM ('basic', 'intermediate', 'advanced', 'native');
'''
statements = sqlparse.parse(sql)
for statement in statements:
statement._pprint_tree()
print()
statements = parse_statements(statements)
for statement in statements:
print(repr(statement))
def test_set_statement():
sql = "SET search_path = 'cover_art_archive';"
statement = next(parse_statements(sqlparse.parse(sql)))
assert_is_instance(statement, Set)
assert_equal('search_path', statement.get_name())
assert_equal('cover_art_archive', statement.get_value())
def test_set_statement_without_quotes():
sql = "SET search_path = cover_art_archive;"
statement = next(parse_statements(sqlparse.parse(sql)))
assert_is_instance(statement, Set)
assert_equal('search_path', statement.get_name())
assert_equal('cover_art_archive', statement.get_value())
def test_set_statement_with_to():
sql = "SET search_path TO 'cover_art_archive';"
statement = next(parse_statements(sqlparse.parse(sql)))
assert_is_instance(statement, Set)
assert_equal('search_path', statement.get_name())
assert_equal('cover_art_archive', statement.get_value())
def test_create_type_statement():
sql = "CREATE TYPE FLUENCY AS ENUM ('basic', 'intermediate');"
statement = next(parse_statements(sqlparse.parse(sql)))
assert_is_instance(statement, CreateType)
assert_equal('FLUENCY', statement.get_name())
assert_equal(['basic', 'intermediate'], statement.get_enum_labels())
def test_create_table_statement():
sql = '''
CREATE TABLE table_name (
id SERIAL, -- PK
name VARCHAR(100) NOT NULL,
created TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL
);
'''
statement = next(parse_statements(sqlparse.parse(sql)))
assert_is_instance(statement, CreateTable)
assert_equal('table_name', statement.get_name())
columns = list(statement.get_columns())
assert_equal(3, len(columns))
column = columns[0]
assert_equal('id', column.get_name())
assert_equal('SERIAL', column.get_type())
assert_equal(None, column.get_default_value())
assert_equal(['-- PK'], column.get_comments())
assert_equal(False, column.is_not_null())
assert_equal(None, column.get_check_constraint())
column = columns[1]
assert_equal('name', column.get_name())
assert_equal('VARCHAR(100)', column.get_type())
assert_equal(None, column.get_default_value())
assert_equal([], column.get_comments())
assert_equal(True, column.is_not_null())
assert_equal(None, column.get_check_constraint())
column = columns[2]
assert_equal('created', column.get_name())
assert_equal('TIMESTAMP WITH TIME ZONE', column.get_type())
assert_equal('now()', column.get_default_value())
assert_equal([], column.get_comments())
assert_equal(True, column.is_not_null())
assert_equal(None, column.get_check_constraint())
def test_create_table_statement_check_constraint():
sql = '''CREATE TABLE table_name (column INTEGER(2) NOT NULL DEFAULT 0 CHECK (edits_pending > 0)); '''
statement = next(parse_statements(sqlparse.parse(sql)))
assert_is_instance(statement, CreateTable)
columns = list(statement.get_columns())
assert_equal(1, len(columns))
column = columns[0]
check = column.get_check_constraint()
assert_true(check)
assert_equal(None, check.get_name())
assert_equal('edits_pending>0', str(check.get_body()))
def test_create_table_statement_named_check_constraint():
sql = '''CREATE TABLE table_name (column INTEGER(2) NOT NULL DEFAULT 0 CONSTRAINT check_column CHECK (edits_pending > 0)); '''
statement = next(parse_statements(sqlparse.parse(sql)))
assert_is_instance(statement, CreateTable)
columns = list(statement.get_columns())
assert_equal(1, len(columns))
column = columns[0]
check = column.get_check_constraint()
assert_true(check)
assert_equal('check_column', check.get_name())
assert_equal('edits_pending>0', str(check.get_body()))
def test_create_index():
sql = '''CREATE INDEX statistic_name ON statistic (name); '''
statement = next(parse_statements(sqlparse.parse(sql)))
assert_is_instance(statement, CreateIndex)
assert_equal('statistic_name', statement.get_name())
assert_equal('statistic', statement.get_table())
assert_equal(['name'], statement.get_columns())
assert_false(statement.is_unique())
def test_create_unique_index():
sql = '''CREATE UNIQUE INDEX statistic_name_date_collected ON statistic (name, date_collected); '''
statement = next(parse_statements(sqlparse.parse(sql)))
statement._pprint_tree()
assert_is_instance(statement, CreateIndex)
assert_equal('statistic_name_date_collected', statement.get_name())
assert_equal('statistic', statement.get_table())
assert_equal(['name', 'date_collected'], statement.get_columns())
assert_true(statement.is_unique())
|
the-stack_106_22210 | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from petstore_api.configuration import Configuration
class DeprecatedObject(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
"""DeprecatedObject - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._name = None
self.discriminator = None
if name is not None:
self.name = name
@property
def name(self):
"""Gets the name of this DeprecatedObject. # noqa: E501
:return: The name of this DeprecatedObject. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this DeprecatedObject.
:param name: The name of this DeprecatedObject. # noqa: E501
:type name: str
"""
self._name = name
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeprecatedObject):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DeprecatedObject):
return True
return self.to_dict() != other.to_dict()
|
the-stack_106_22212 | import networkx as nx
import pytest
import networkx.generators.line as line
from networkx.utils import edges_equal
class TestGeneratorLine:
def test_star(self):
G = nx.star_graph(5)
L = nx.line_graph(G)
assert nx.is_isomorphic(L, nx.complete_graph(5))
def test_path(self):
G = nx.path_graph(5)
L = nx.line_graph(G)
assert nx.is_isomorphic(L, nx.path_graph(4))
def test_cycle(self):
G = nx.cycle_graph(5)
L = nx.line_graph(G)
assert nx.is_isomorphic(L, G)
def test_digraph1(self):
G = nx.DiGraph([(0, 1), (0, 2), (0, 3)])
L = nx.line_graph(G)
# no edge graph, but with nodes
assert L.adj == {(0, 1): {}, (0, 2): {}, (0, 3): {}}
def test_multigraph1(self):
G = nx.MultiGraph([(0, 1), (0, 1), (1, 0), (0, 2), (2, 0), (0, 3)])
L = nx.line_graph(G)
# no edge graph, but with nodes
assert edges_equal(
L.edges(),
[
((0, 3, 0), (0, 1, 0)),
((0, 3, 0), (0, 2, 0)),
((0, 3, 0), (0, 2, 1)),
((0, 3, 0), (0, 1, 1)),
((0, 3, 0), (0, 1, 2)),
((0, 1, 0), (0, 1, 1)),
((0, 1, 0), (0, 2, 0)),
((0, 1, 0), (0, 1, 2)),
((0, 1, 0), (0, 2, 1)),
((0, 1, 1), (0, 1, 2)),
((0, 1, 1), (0, 2, 0)),
((0, 1, 1), (0, 2, 1)),
((0, 1, 2), (0, 2, 0)),
((0, 1, 2), (0, 2, 1)),
((0, 2, 0), (0, 2, 1)),
],
)
def test_multigraph2(self):
G = nx.MultiGraph([(1, 2), (2, 1)])
L = nx.line_graph(G)
assert edges_equal(L.edges(), [((1, 2, 0), (1, 2, 1))])
def test_multidigraph1(self):
G = nx.MultiDiGraph([(1, 2), (2, 1)])
L = nx.line_graph(G)
assert edges_equal(L.edges(), [((1, 2, 0), (2, 1, 0)), ((2, 1, 0), (1, 2, 0))])
def test_multidigraph2(self):
G = nx.MultiDiGraph([(0, 1), (0, 1), (0, 1), (1, 2)])
L = nx.line_graph(G)
assert edges_equal(
L.edges(),
[((0, 1, 0), (1, 2, 0)), ((0, 1, 1), (1, 2, 0)), ((0, 1, 2), (1, 2, 0))],
)
def test_digraph2(self):
G = nx.DiGraph([(0, 1), (1, 2), (2, 3)])
L = nx.line_graph(G)
assert edges_equal(L.edges(), [((0, 1), (1, 2)), ((1, 2), (2, 3))])
def test_create1(self):
G = nx.DiGraph([(0, 1), (1, 2), (2, 3)])
L = nx.line_graph(G, create_using=nx.Graph())
assert edges_equal(L.edges(), [((0, 1), (1, 2)), ((1, 2), (2, 3))])
def test_create2(self):
G = nx.Graph([(0, 1), (1, 2), (2, 3)])
L = nx.line_graph(G, create_using=nx.DiGraph())
assert edges_equal(L.edges(), [((0, 1), (1, 2)), ((1, 2), (2, 3))])
class TestGeneratorInverseLine:
def test_example(self):
G = nx.Graph()
G_edges = [
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[2, 3],
[2, 5],
[2, 6],
[2, 7],
[3, 4],
[3, 5],
[6, 7],
[6, 8],
[7, 8],
]
G.add_edges_from(G_edges)
H = nx.inverse_line_graph(G)
solution = nx.Graph()
solution_edges = [
("a", "b"),
("a", "c"),
("a", "d"),
("a", "e"),
("c", "d"),
("e", "f"),
("e", "g"),
("f", "g"),
]
solution.add_edges_from(solution_edges)
assert nx.is_isomorphic(H, solution)
def test_example_2(self):
G = nx.Graph()
G_edges = [[1, 2], [1, 3], [2, 3], [3, 4], [3, 5], [4, 5]]
G.add_edges_from(G_edges)
H = nx.inverse_line_graph(G)
solution = nx.Graph()
solution_edges = [("a", "c"), ("b", "c"), ("c", "d"), ("d", "e"), ("d", "f")]
solution.add_edges_from(solution_edges)
assert nx.is_isomorphic(H, solution)
def test_pair(self):
G = nx.path_graph(2)
H = nx.inverse_line_graph(G)
solution = nx.path_graph(3)
assert nx.is_isomorphic(H, solution)
def test_line(self):
G = nx.path_graph(5)
solution = nx.path_graph(6)
H = nx.inverse_line_graph(G)
assert nx.is_isomorphic(H, solution)
def test_triangle_graph(self):
G = nx.complete_graph(3)
H = nx.inverse_line_graph(G)
alternative_solution = nx.Graph()
alternative_solution.add_edges_from([[0, 1], [0, 2], [0, 3]])
# there are two alternative inverse line graphs for this case
# so long as we get one of them the test should pass
assert nx.is_isomorphic(H, G) or nx.is_isomorphic(H, alternative_solution)
def test_cycle(self):
G = nx.cycle_graph(5)
H = nx.inverse_line_graph(G)
assert nx.is_isomorphic(H, G)
def test_empty(self):
G = nx.Graph()
H = nx.inverse_line_graph(G)
assert nx.is_isomorphic(H, nx.complete_graph(1))
def test_K1(self):
G = nx.complete_graph(1)
H = nx.inverse_line_graph(G)
solution = nx.path_graph(2)
assert nx.is_isomorphic(H, solution)
def test_claw(self):
# This is the simplest non-line graph
G = nx.Graph()
G_edges = [[0, 1], [0, 2], [0, 3]]
G.add_edges_from(G_edges)
pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G)
def test_non_line_graph(self):
# These are other non-line graphs
# wheel graph with 6 nodes
G = nx.Graph()
G_edges = [
[0, 1],
[0, 2],
[0, 3],
[0, 4],
[0, 5],
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 1],
]
G.add_edges_from(G_edges)
pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G)
# 3---4---5
# / \ / \ /
# 0---1---2
G = nx.Graph()
G_edges = [
[0, 1],
[1, 2],
[3, 4],
[4, 5],
[0, 3],
[1, 3],
[1, 4],
[2, 4],
[2, 5],
]
G.add_edges_from(G_edges)
pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G)
# K_5 minus an edge
K5me = nx.complete_graph(5)
K5me.remove_edge(0, 1)
pytest.raises(nx.NetworkXError, nx.inverse_line_graph, K5me)
def test_wrong_graph_type(self):
G = nx.DiGraph()
G_edges = [[0, 1], [0, 2], [0, 3]]
G.add_edges_from(G_edges)
pytest.raises(nx.NetworkXNotImplemented, nx.inverse_line_graph, G)
G = nx.MultiGraph()
G_edges = [[0, 1], [0, 2], [0, 3]]
G.add_edges_from(G_edges)
pytest.raises(nx.NetworkXNotImplemented, nx.inverse_line_graph, G)
def test_line_inverse_line_complete(self):
G = nx.complete_graph(10)
H = nx.line_graph(G)
J = nx.inverse_line_graph(H)
assert nx.is_isomorphic(G, J)
def test_line_inverse_line_path(self):
G = nx.path_graph(10)
H = nx.line_graph(G)
J = nx.inverse_line_graph(H)
assert nx.is_isomorphic(G, J)
def test_line_inverse_line_hypercube(self):
G = nx.hypercube_graph(5)
H = nx.line_graph(G)
J = nx.inverse_line_graph(H)
assert nx.is_isomorphic(G, J)
def test_line_inverse_line_cycle(self):
G = nx.cycle_graph(10)
H = nx.line_graph(G)
J = nx.inverse_line_graph(H)
assert nx.is_isomorphic(G, J)
def test_line_inverse_line_star(self):
G = nx.star_graph(20)
H = nx.line_graph(G)
J = nx.inverse_line_graph(H)
assert nx.is_isomorphic(G, J)
def test_line_inverse_line_multipartite(self):
G = nx.complete_multipartite_graph(3, 4, 5)
H = nx.line_graph(G)
J = nx.inverse_line_graph(H)
assert nx.is_isomorphic(G, J)
def test_line_inverse_line_dgm(self):
G = nx.dorogovtsev_goltsev_mendes_graph(4)
H = nx.line_graph(G)
J = nx.inverse_line_graph(H)
assert nx.is_isomorphic(G, J)
|
the-stack_106_22214 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import uuid
from datetime import date, datetime
from unittest import mock
import jinja2
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.lineage.entities import File
from airflow.models import DAG
from airflow.models.baseoperator import BaseOperatorMeta, chain, cross_downstream
from airflow.operators.dummy import DummyOperator
from tests.models import DEFAULT_DATE
from tests.test_utils.mock_operators import DeprecatedOperator, MockNamedTuple, MockOperator
class ClassWithCustomAttributes:
"""Class for testing purpose: allows to create objects with custom attributes in one single statement."""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{ClassWithCustomAttributes.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
# Objects with circular references (for testing purpose)
object1 = ClassWithCustomAttributes(attr="{{ foo }}_1", template_fields=["ref"])
object2 = ClassWithCustomAttributes(attr="{{ foo }}_2", ref=object1, template_fields=["ref"])
setattr(object1, 'ref', object2)
# Essentially similar to airflow.models.baseoperator.BaseOperator
class DummyClass(metaclass=BaseOperatorMeta):
def __init__(self, test_param, params=None, default_args=None):
self.test_param = test_param
def set_xcomargs_dependencies(self):
...
class DummySubClass(DummyClass):
def __init__(self, test_sub_param, **kwargs):
super().__init__(**kwargs)
self.test_sub_param = test_sub_param
class TestBaseOperator(unittest.TestCase):
def test_apply(self):
dummy = DummyClass(test_param=True)
assert dummy.test_param
with pytest.raises(AirflowException, match='Argument.*test_param.*required'):
DummySubClass(test_sub_param=True)
def test_default_args(self):
default_args = {'test_param': True}
dummy_class = DummyClass(default_args=default_args)
assert dummy_class.test_param
default_args = {'test_param': True, 'test_sub_param': True}
dummy_subclass = DummySubClass(default_args=default_args)
assert dummy_class.test_param
assert dummy_subclass.test_sub_param
default_args = {'test_param': True}
dummy_subclass = DummySubClass(default_args=default_args, test_sub_param=True)
assert dummy_class.test_param
assert dummy_subclass.test_sub_param
with pytest.raises(AirflowException, match='Argument.*test_sub_param.*required'):
DummySubClass(default_args=default_args)
def test_incorrect_default_args(self):
default_args = {'test_param': True, 'extra_param': True}
dummy_class = DummyClass(default_args=default_args)
assert dummy_class.test_param
default_args = {'random_params': True}
with pytest.raises(AirflowException, match='Argument.*test_param.*required'):
DummyClass(default_args=default_args)
def test_incorrect_priority_weight(self):
error_msg = "`priority_weight` for task 'test_op' only accepts integers, received '<class 'str'>'."
with pytest.raises(AirflowException, match=error_msg):
DummyOperator(task_id="test_op", priority_weight="2")
@parameterized.expand(
[
("{{ foo }}", {"foo": "bar"}, "bar"),
(["{{ foo }}_1", "{{ foo }}_2"], {"foo": "bar"}, ["bar_1", "bar_2"]),
(("{{ foo }}_1", "{{ foo }}_2"), {"foo": "bar"}, ("bar_1", "bar_2")),
(
{"key1": "{{ foo }}_1", "key2": "{{ foo }}_2"},
{"foo": "bar"},
{"key1": "bar_1", "key2": "bar_2"},
),
(
{"key_{{ foo }}_1": 1, "key_2": "{{ foo }}_2"},
{"foo": "bar"},
{"key_{{ foo }}_1": 1, "key_2": "bar_2"},
),
(date(2018, 12, 6), {"foo": "bar"}, date(2018, 12, 6)),
(datetime(2018, 12, 6, 10, 55), {"foo": "bar"}, datetime(2018, 12, 6, 10, 55)),
(MockNamedTuple("{{ foo }}_1", "{{ foo }}_2"), {"foo": "bar"}, MockNamedTuple("bar_1", "bar_2")),
({"{{ foo }}_1", "{{ foo }}_2"}, {"foo": "bar"}, {"bar_1", "bar_2"}),
(None, {}, None),
([], {}, []),
({}, {}, {}),
(
# check nested fields can be templated
ClassWithCustomAttributes(att1="{{ foo }}_1", att2="{{ foo }}_2", template_fields=["att1"]),
{"foo": "bar"},
ClassWithCustomAttributes(att1="bar_1", att2="{{ foo }}_2", template_fields=["att1"]),
),
(
# check deep nested fields can be templated
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ foo }}_1", att2="{{ foo }}_2", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ foo }}_3", att4="{{ foo }}_4", template_fields=["att3"]
),
template_fields=["nested1"],
),
{"foo": "bar"},
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="bar_1", att2="{{ foo }}_2", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ foo }}_3", att4="{{ foo }}_4", template_fields=["att3"]
),
template_fields=["nested1"],
),
),
(
# check null value on nested template field
ClassWithCustomAttributes(att1=None, template_fields=["att1"]),
{},
ClassWithCustomAttributes(att1=None, template_fields=["att1"]),
),
(
# check there is no RecursionError on circular references
object1,
{"foo": "bar"},
object1,
),
# By default, Jinja2 drops one (single) trailing newline
("{{ foo }}\n\n", {"foo": "bar"}, "bar\n"),
]
)
def test_render_template(self, content, context, expected_output):
"""Test render_template given various input types."""
with DAG("test-dag", start_date=DEFAULT_DATE):
task = DummyOperator(task_id="op1")
result = task.render_template(content, context)
assert result == expected_output
@parameterized.expand(
[
("{{ foo }}", {"foo": "bar"}, "bar"),
("{{ foo }}", {"foo": ["bar1", "bar2"]}, ["bar1", "bar2"]),
(["{{ foo }}", "{{ foo | length}}"], {"foo": ["bar1", "bar2"]}, [['bar1', 'bar2'], 2]),
(("{{ foo }}_1", "{{ foo }}_2"), {"foo": "bar"}, ("bar_1", "bar_2")),
("{{ ds }}", {"ds": date(2018, 12, 6)}, date(2018, 12, 6)),
(datetime(2018, 12, 6, 10, 55), {"foo": "bar"}, datetime(2018, 12, 6, 10, 55)),
("{{ ds }}", {"ds": datetime(2018, 12, 6, 10, 55)}, datetime(2018, 12, 6, 10, 55)),
(MockNamedTuple("{{ foo }}_1", "{{ foo }}_2"), {"foo": "bar"}, MockNamedTuple("bar_1", "bar_2")),
(
("{{ foo }}", "{{ foo.isoformat() }}"),
{"foo": datetime(2018, 12, 6, 10, 55)},
(datetime(2018, 12, 6, 10, 55), '2018-12-06T10:55:00'),
),
(None, {}, None),
([], {}, []),
({}, {}, {}),
]
)
def test_render_template_with_native_envs(self, content, context, expected_output):
"""Test render_template given various input types with Native Python types"""
with DAG("test-dag", start_date=DEFAULT_DATE, render_template_as_native_obj=True):
task = DummyOperator(task_id="op1")
result = task.render_template(content, context)
assert result == expected_output
def test_render_template_fields(self):
"""Verify if operator attributes are correctly templated."""
with DAG("test-dag", start_date=DEFAULT_DATE):
task = MockOperator(task_id="op1", arg1="{{ foo }}", arg2="{{ bar }}")
# Assert nothing is templated yet
assert task.arg1 == "{{ foo }}"
assert task.arg2 == "{{ bar }}"
# Trigger templating and verify if attributes are templated correctly
task.render_template_fields(context={"foo": "footemplated", "bar": "bartemplated"})
assert task.arg1 == "footemplated"
assert task.arg2 == "bartemplated"
def test_render_template_fields_native_envs(self):
"""Verify if operator attributes are correctly templated to Native Python objects."""
with DAG("test-dag", start_date=DEFAULT_DATE, render_template_as_native_obj=True):
task = MockOperator(task_id="op1", arg1="{{ foo }}", arg2="{{ bar }}")
# Assert nothing is templated yet
assert task.arg1 == "{{ foo }}"
assert task.arg2 == "{{ bar }}"
# Trigger templating and verify if attributes are templated correctly
task.render_template_fields(context={"foo": ["item1", "item2"], "bar": 3})
assert task.arg1 == ["item1", "item2"]
assert task.arg2 == 3
@parameterized.expand(
[
({"user_defined_macros": {"foo": "bar"}}, "{{ foo }}", {}, "bar"),
({"user_defined_macros": {"foo": "bar"}}, 1, {}, 1),
(
{"user_defined_filters": {"hello": lambda name: f"Hello {name}"}},
"{{ 'world' | hello }}",
{},
"Hello world",
),
]
)
def test_render_template_fields_with_dag_settings(self, dag_kwargs, content, context, expected_output):
"""Test render_template with additional DAG settings."""
with DAG("test-dag", start_date=DEFAULT_DATE, **dag_kwargs):
task = DummyOperator(task_id="op1")
result = task.render_template(content, context)
assert result == expected_output
@parameterized.expand([(object(),), (uuid.uuid4(),)])
def test_render_template_fields_no_change(self, content):
"""Tests if non-templatable types remain unchanged."""
with DAG("test-dag", start_date=DEFAULT_DATE):
task = DummyOperator(task_id="op1")
result = task.render_template(content, {"foo": "bar"})
assert content == result
def test_render_template_field_undefined_default(self):
"""Test render_template with template_undefined unchanged."""
with DAG("test-dag", start_date=DEFAULT_DATE):
task = DummyOperator(task_id="op1")
with pytest.raises(jinja2.UndefinedError):
task.render_template("{{ foo }}", {})
def test_render_template_field_undefined_strict(self):
"""Test render_template with template_undefined configured."""
with DAG("test-dag", start_date=DEFAULT_DATE, template_undefined=jinja2.StrictUndefined):
task = DummyOperator(task_id="op1")
with pytest.raises(jinja2.UndefinedError):
task.render_template("{{ foo }}", {})
def test_render_template_field_undefined_not_strict(self):
"""Test render_template with template_undefined configured to silently error."""
with DAG("test-dag", start_date=DEFAULT_DATE, template_undefined=jinja2.Undefined):
task = DummyOperator(task_id="op1")
assert task.render_template("{{ foo }}", {}) == ""
def test_nested_template_fields_declared_must_exist(self):
"""Test render_template when a nested template field is missing."""
with DAG("test-dag", start_date=DEFAULT_DATE):
task = DummyOperator(task_id="op1")
with pytest.raises(AttributeError) as ctx:
task.render_template(ClassWithCustomAttributes(template_fields=["missing_field"]), {})
assert "'ClassWithCustomAttributes' object has no attribute 'missing_field'" == str(ctx.value)
def test_jinja_invalid_expression_is_just_propagated(self):
"""Test render_template propagates Jinja invalid expression errors."""
with DAG("test-dag", start_date=DEFAULT_DATE):
task = DummyOperator(task_id="op1")
with pytest.raises(jinja2.exceptions.TemplateSyntaxError):
task.render_template("{{ invalid expression }}", {})
@mock.patch("airflow.templates.SandboxedEnvironment", autospec=True)
def test_jinja_env_creation(self, mock_jinja_env):
"""Verify if a Jinja environment is created only once when templating."""
with DAG("test-dag", start_date=DEFAULT_DATE):
task = MockOperator(task_id="op1", arg1="{{ foo }}", arg2="{{ bar }}")
task.render_template_fields(context={"foo": "whatever", "bar": "whatever"})
assert mock_jinja_env.call_count == 1
@mock.patch("airflow.models.dag.NativeEnvironment", autospec=True)
def test_jinja_env_creation_native_environment(self, mock_jinja_env):
"""Verify if a Jinja environment is created only once when templating."""
with DAG("test-dag", start_date=DEFAULT_DATE, render_template_as_native_obj=True):
task = MockOperator(task_id="op1", arg1="{{ foo }}", arg2="{{ bar }}")
task.render_template_fields(context={"foo": "whatever", "bar": "whatever"})
assert mock_jinja_env.call_count == 1
def test_set_jinja_env_additional_option(self):
"""Test render_template given various input types."""
with DAG(
"test-dag", start_date=DEFAULT_DATE, jinja_environment_kwargs={'keep_trailing_newline': True}
):
task = DummyOperator(task_id="op1")
result = task.render_template("{{ foo }}\n\n", {"foo": "bar"})
assert result == "bar\n\n"
def test_override_jinja_env_option(self):
"""Test render_template given various input types."""
with DAG("test-dag", start_date=DEFAULT_DATE, jinja_environment_kwargs={'cache_size': 50}):
task = DummyOperator(task_id="op1")
result = task.render_template("{{ foo }}", {"foo": "bar"})
assert result == "bar"
def test_default_resources(self):
task = DummyOperator(task_id="default-resources")
assert task.resources is None
def test_custom_resources(self):
task = DummyOperator(task_id="custom-resources", resources={"cpus": 1, "ram": 1024})
assert task.resources.cpus.qty == 1
assert task.resources.ram.qty == 1024
def test_default_email_on_actions(self):
test_task = DummyOperator(task_id='test_default_email_on_actions')
assert test_task.email_on_retry is True
assert test_task.email_on_failure is True
def test_email_on_actions(self):
test_task = DummyOperator(
task_id='test_default_email_on_actions', email_on_retry=False, email_on_failure=True
)
assert test_task.email_on_retry is False
assert test_task.email_on_failure is True
class TestBaseOperatorMethods(unittest.TestCase):
def test_cross_downstream(self):
"""Test if all dependencies between tasks are all set correctly."""
dag = DAG(dag_id="test_dag", start_date=datetime.now())
start_tasks = [DummyOperator(task_id=f"t{i}", dag=dag) for i in range(1, 4)]
end_tasks = [DummyOperator(task_id=f"t{i}", dag=dag) for i in range(4, 7)]
cross_downstream(from_tasks=start_tasks, to_tasks=end_tasks)
for start_task in start_tasks:
assert set(start_task.get_direct_relatives(upstream=False)) == set(end_tasks)
def test_chain(self):
dag = DAG(dag_id='test_chain', start_date=datetime.now())
[op1, op2, op3, op4, op5, op6] = [DummyOperator(task_id=f't{i}', dag=dag) for i in range(1, 7)]
chain(op1, [op2, op3], [op4, op5], op6)
assert {op2, op3} == set(op1.get_direct_relatives(upstream=False))
assert [op4] == op2.get_direct_relatives(upstream=False)
assert [op5] == op3.get_direct_relatives(upstream=False)
assert {op4, op5} == set(op6.get_direct_relatives(upstream=True))
def test_chain_not_support_type(self):
dag = DAG(dag_id='test_chain', start_date=datetime.now())
[op1, op2] = [DummyOperator(task_id=f't{i}', dag=dag) for i in range(1, 3)]
with pytest.raises(TypeError):
chain([op1, op2], 1)
def test_chain_different_length_iterable(self):
dag = DAG(dag_id='test_chain', start_date=datetime.now())
[op1, op2, op3, op4, op5] = [DummyOperator(task_id=f't{i}', dag=dag) for i in range(1, 6)]
with pytest.raises(AirflowException):
chain([op1, op2], [op3, op4, op5])
def test_lineage_composition(self):
"""
Test composition with lineage
"""
inlet = File(url="in")
outlet = File(url="out")
dag = DAG("test-dag", start_date=DEFAULT_DATE)
task1 = DummyOperator(task_id="op1", dag=dag)
task2 = DummyOperator(task_id="op2", dag=dag)
# mock
task1.supports_lineage = True
# note: operator precedence still applies
inlet > task1 | (task2 > outlet)
assert task1.get_inlet_defs() == [inlet]
assert task2.get_inlet_defs() == [task1.task_id]
assert task2.get_outlet_defs() == [outlet]
fail = ClassWithCustomAttributes()
with pytest.raises(TypeError):
fail > task1
with pytest.raises(TypeError):
task1 > fail
with pytest.raises(TypeError):
fail | task1
with pytest.raises(TypeError):
task1 | fail
task3 = DummyOperator(task_id="op3", dag=dag)
extra = File(url="extra")
[inlet, extra] > task3
assert task3.get_inlet_defs() == [inlet, extra]
task1.supports_lineage = False
with pytest.raises(ValueError):
task1 | task3
assert task2.supports_lineage is False
task2 | task3
assert len(task3.get_inlet_defs()) == 3
task4 = DummyOperator(task_id="op4", dag=dag)
task4 > [inlet, outlet, extra]
assert task4.get_outlet_defs() == [inlet, outlet, extra]
def test_warnings_are_properly_propagated(self):
with pytest.warns(DeprecationWarning) as warnings:
DeprecatedOperator(task_id="test")
assert len(warnings) == 1
warning = warnings[0]
# Here we check that the trace points to the place
# where the deprecated class was used
assert warning.filename == __file__
class CustomOp(DummyOperator):
template_fields = ("field", "field2")
def __init__(self, field=None, field2=None, **kwargs):
super().__init__(**kwargs)
self.field = field
self.field2 = field2
def execute(self, context):
self.field = None
class TestXComArgsRelationsAreResolved:
def test_setattr_performs_no_custom_action_at_execute_time(self):
op = CustomOp(task_id="test_task")
op_copy = op.prepare_for_execution()
with mock.patch("airflow.models.baseoperator.BaseOperator.set_xcomargs_dependencies") as method_mock:
op_copy.execute({})
assert method_mock.call_count == 0
def test_upstream_is_set_when_template_field_is_xcomarg(self):
with DAG("xcomargs_test", default_args={"start_date": datetime.today()}):
op1 = DummyOperator(task_id="op1")
op2 = CustomOp(task_id="op2", field=op1.output)
assert op1 in op2.upstream_list
assert op2 in op1.downstream_list
def test_set_xcomargs_dependencies_works_recursively(self):
with DAG("xcomargs_test", default_args={"start_date": datetime.today()}):
op1 = DummyOperator(task_id="op1")
op2 = DummyOperator(task_id="op2")
op3 = CustomOp(task_id="op3", field=[op1.output, op2.output])
op4 = CustomOp(task_id="op4", field={"op1": op1.output, "op2": op2.output})
assert op1 in op3.upstream_list
assert op2 in op3.upstream_list
assert op1 in op4.upstream_list
assert op2 in op4.upstream_list
def test_set_xcomargs_dependencies_works_when_set_after_init(self):
with DAG(dag_id='xcomargs_test', default_args={"start_date": datetime.today()}):
op1 = DummyOperator(task_id="op1")
op2 = CustomOp(task_id="op2")
op2.field = op1.output # value is set after init
assert op1 in op2.upstream_list
def test_set_xcomargs_dependencies_error_when_outside_dag(self):
with pytest.raises(AirflowException):
op1 = DummyOperator(task_id="op1")
CustomOp(task_id="op2", field=op1.output)
|
the-stack_106_22217 | class Util:
@staticmethod
def get_object_from_http_request(request):
return request.split("\\r\\n")[0].split(" ")[1][1:]
@staticmethod
def get_file_data(filename, mode):
f = open(filename, mode)
data = f.read()
f.close()
return data
@staticmethod
def get_simple_http_header(status, headers):
response = ''
if status == 'ok':
response += 'HTTP/1.1 200 OK\r\n'
else:
response += 'HTTP/1.1 404 Not Found\r\n'
for key in headers:
response += key + ':' + headers[key] + '\r\n'
response += '\r\n'
return response.encode('ascii')
@staticmethod
def encode_string(string, mode):
return string.encode(mode)
@staticmethod
def receive_all(sock):
data = b''
while True:
part = sock.recv(4096)
data += part
if len(part) < 4096:
break
return data
|
the-stack_106_22218 | from os import path
from io import open
from setuptools import setup
from setuptools import find_packages
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='mimir',
version='2.0.0',
description='Smart OSINT collection and enrichment',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/deadbits/mimir',
author='Adam M. Swanda',
author_email='[email protected]',
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Information Technology',
'Topic :: Security',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
keywords='osint cybersecurity infosec spamlists reputationdbs',
packages=find_packages(exclude=['mimir/data/docs']),
python_requires='>=3.7',
install_requires=[
'requests',
'configparser',
'validators',
'colorama',
'pygments',
'urllib3',
'dnspython',
'shodan'
'whois',
'pymisp',
'spam-list',
'datetime',
'maxminddb'
],
package_data={
'mimir': [
'mimir/data/apikeys.json.example',
'mimir/maxmind/*',
'mimir/data/logo/mimir.png'
],
},
data_files=[
('mimir',
[
'mimir/data/apikeys.json.example',
'mimir/data/maxmind/*',
'mimir/data/extra/logo/minir-logo.png'
]
)
],
entry_points={
'console_scripts': [
'mimir=mimir.mimir:main',
],
},
project_urls={
'Bug Reports': 'https://github.com/deadbits/mimir/issues',
'Say Thanks!': 'http://saythanks.io/to/deadbits',
'Source': 'https://github.com/deadbits/mimir/',
},
)
|
the-stack_106_22220 | from sympy.integrals.transforms import (mellin_transform,
inverse_mellin_transform, laplace_transform, inverse_laplace_transform,
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform,
cosine_transform, inverse_cosine_transform,
hankel_transform, inverse_hankel_transform,
LaplaceTransform, FourierTransform, SineTransform, CosineTransform,
InverseLaplaceTransform, InverseFourierTransform,
InverseSineTransform, InverseCosineTransform, IntegralTransformError)
from sympy import (
gamma, exp, oo, Heaviside, symbols, Symbol, re, factorial, pi, arg,
cos, S, Abs, And, Or, sin, sqrt, I, log, tan, hyperexpand, meijerg,
EulerGamma, erf, erfc, besselj, bessely, besseli, besselk,
exp_polar, polar_lift, unpolarify, Function, expint, expand_mul,
gammasimp, trigsimp, atan, sinh, cosh, Ne, periodic_argument, atan2, Abs)
from sympy.utilities.pytest import XFAIL, slow, skip, raises
from sympy.matrices import Matrix, eye
from sympy.abc import x, s, a, b, c, d
nu, beta, rho = symbols('nu beta rho')
def test_undefined_function():
from sympy import Function, MellinTransform
f = Function('f')
assert mellin_transform(f(x), x, s) == MellinTransform(f(x), x, s)
assert mellin_transform(f(x) + exp(-x), x, s) == \
(MellinTransform(f(x), x, s) + gamma(s), (0, oo), True)
assert laplace_transform(2*f(x), x, s) == 2*LaplaceTransform(f(x), x, s)
# TODO test derivative and other rules when implemented
def test_free_symbols():
from sympy import Function
f = Function('f')
assert mellin_transform(f(x), x, s).free_symbols == {s}
assert mellin_transform(f(x)*a, x, s).free_symbols == {s, a}
def test_as_integral():
from sympy import Function, Integral
f = Function('f')
assert mellin_transform(f(x), x, s).rewrite('Integral') == \
Integral(x**(s - 1)*f(x), (x, 0, oo))
assert fourier_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-2*I*pi*s*x), (x, -oo, oo))
assert laplace_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-s*x), (x, 0, oo))
assert str(2*pi*I*inverse_mellin_transform(f(s), s, x, (a, b)).rewrite('Integral')) \
== "Integral(x**(-s)*f(s), (s, _c - oo*I, _c + oo*I))"
assert str(2*pi*I*inverse_laplace_transform(f(s), s, x).rewrite('Integral')) == \
"Integral(f(s)*exp(s*x), (s, _c - oo*I, _c + oo*I))"
assert inverse_fourier_transform(f(s), s, x).rewrite('Integral') == \
Integral(f(s)*exp(2*I*pi*s*x), (s, -oo, oo))
# NOTE this is stuck in risch because meijerint cannot handle it
@slow
@XFAIL
def test_mellin_transform_fail():
skip("Risch takes forever.")
MT = mellin_transform
bpos = symbols('b', positive=True)
bneg = symbols('b', negative=True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
# TODO does not work with bneg, argument wrong. Needs changes to matching.
assert MT(expr.subs(b, -bpos), x, s) == \
((-1)**(a + 1)*2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(a + s)
*gamma(1 - a - 2*s)/gamma(1 - s),
(-re(a), -re(a)/2 + S(1)/2), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, -bpos), x, s) == \
(
2**(a + 2*s)*a*bpos**(a + 2*s)*gamma(-a - 2*
s)*gamma(a + s)/gamma(-s + 1),
(-re(a), -re(a)/2), True)
# Test exponent 1:
assert MT(expr.subs({b: -bpos, a: 1}), x, s) == \
(-bpos**(2*s + 1)*gamma(s)*gamma(-s - S(1)/2)/(2*sqrt(pi)),
(-1, -S(1)/2), True)
def test_mellin_transform():
from sympy import Max, Min
MT = mellin_transform
bpos = symbols('b', positive=True)
# 8.4.2
assert MT(x**nu*Heaviside(x - 1), x, s) == \
(-1/(nu + s), (-oo, -re(nu)), True)
assert MT(x**nu*Heaviside(1 - x), x, s) == \
(1/(nu + s), (-re(nu), oo), True)
assert MT((1 - x)**(beta - 1)*Heaviside(1 - x), x, s) == \
(gamma(beta)*gamma(s)/gamma(beta + s), (0, oo), -re(beta) < 0)
assert MT((x - 1)**(beta - 1)*Heaviside(x - 1), x, s) == \
(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
(-oo, -re(beta) + 1), -re(beta) < 0)
assert MT((1 + x)**(-rho), x, s) == \
(gamma(s)*gamma(rho - s)/gamma(rho), (0, re(rho)), True)
# TODO also the conditions should be simplified, e.g.
# And(re(rho) - 1 < 0, re(rho) < 1) should just be
# re(rho) < 1
assert MT(abs(1 - x)**(-rho), x, s) == (
2*sin(pi*rho/2)*gamma(1 - rho)*
cos(pi*(rho/2 - s))*gamma(s)*gamma(rho-s)/pi,
(0, re(rho)), And(re(rho) - 1 < 0, re(rho) < 1))
mt = MT((1 - x)**(beta - 1)*Heaviside(1 - x)
+ a*(x - 1)**(beta - 1)*Heaviside(x - 1), x, s)
assert mt[1], mt[2] == ((0, -re(beta) + 1), -re(beta) < 0)
assert MT((x**a - b**a)/(x - b), x, s)[0] == \
pi*b**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s)))
assert MT((x**a - bpos**a)/(x - bpos), x, s) == \
(pi*bpos**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s))),
(Max(-re(a), 0), Min(1 - re(a), 1)), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, bpos), x, s) == \
(-a*(2*bpos)**(a + 2*s)*gamma(s)*gamma(-a - 2*s)/gamma(-a - s + 1),
(0, -re(a)/2), True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
assert MT(expr.subs(b, bpos), x, s) == \
(2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(s)
*gamma(1 - a - 2*s)/gamma(1 - a - s),
(0, -re(a)/2 + S(1)/2), True)
# 8.4.2
assert MT(exp(-x), x, s) == (gamma(s), (0, oo), True)
assert MT(exp(-1/x), x, s) == (gamma(-s), (-oo, 0), True)
# 8.4.5
assert MT(log(x)**4*Heaviside(1 - x), x, s) == (24/s**5, (0, oo), True)
assert MT(log(x)**3*Heaviside(x - 1), x, s) == (6/s**4, (-oo, 0), True)
assert MT(log(x + 1), x, s) == (pi/(s*sin(pi*s)), (-1, 0), True)
assert MT(log(1/x + 1), x, s) == (pi/(s*sin(pi*s)), (0, 1), True)
assert MT(log(abs(1 - x)), x, s) == (pi/(s*tan(pi*s)), (-1, 0), True)
assert MT(log(abs(1 - 1/x)), x, s) == (pi/(s*tan(pi*s)), (0, 1), True)
# 8.4.14
assert MT(erf(sqrt(x)), x, s) == \
(-gamma(s + S(1)/2)/(sqrt(pi)*s), (-S(1)/2, 0), True)
@slow
def test_mellin_transform2():
MT = mellin_transform
# TODO we cannot currently do these (needs summation of 3F2(-1))
# this also implies that they cannot be written as a single g-function
# (although this is possible)
mt = MT(log(x)/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)**2/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)/(x + 1)**2, x, s)
assert mt[1:] == ((0, 2), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
@slow
def test_mellin_transform_bessel():
from sympy import Max
MT = mellin_transform
# 8.4.19
assert MT(besselj(a, 2*sqrt(x)), x, s) == \
(gamma(a/2 + s)/gamma(a/2 - s + 1), (-re(a)/2, S(3)/4), True)
assert MT(sin(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(-2*s + S(1)/2)*gamma(a/2 + s + S(1)/2)/(
gamma(-a/2 - s + 1)*gamma(a - 2*s + 1)), (
-re(a)/2 - S(1)/2, S(1)/4), True)
assert MT(cos(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(a/2 + s)*gamma(-2*s + S(1)/2)/(
gamma(-a/2 - s + S(1)/2)*gamma(a - 2*s + 1)), (
-re(a)/2, S(1)/4), True)
assert MT(besselj(a, sqrt(x))**2, x, s) == \
(gamma(a + s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
(-re(a), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*besselj(-a, sqrt(x)), x, s) == \
(gamma(s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - a - s)*gamma(1 + a - s)),
(0, S(1)/2), True)
# NOTE: prudnikov gives the strip below as (1/2 - re(a), 1). As far as
# I can see this is wrong (since besselj(z) ~ 1/sqrt(z) for z large)
assert MT(besselj(a - 1, sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(gamma(1 - s)*gamma(a + s - S(1)/2)
/ (sqrt(pi)*gamma(S(3)/2 - s)*gamma(a - s + S(1)/2)),
(S(1)/2 - re(a), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*besselj(b, sqrt(x)), x, s) == \
(4**s*gamma(1 - 2*s)*gamma((a + b)/2 + s)
/ (gamma(1 - s + (b - a)/2)*gamma(1 - s + (a - b)/2)
*gamma( 1 - s + (a + b)/2)),
(-(re(a) + re(b))/2, S(1)/2), True)
assert MT(besselj(a, sqrt(x))**2 + besselj(-a, sqrt(x))**2, x, s)[1:] == \
((Max(re(a), -re(a)), S(1)/2), True)
# Section 8.4.20
assert MT(bessely(a, 2*sqrt(x)), x, s) == \
(-cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)/pi,
(Max(-re(a)/2, re(a)/2), S(3)/4), True)
assert MT(sin(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*sin(pi*(a/2 - s))*gamma(S(1)/2 - 2*s)
* gamma((1 - a)/2 + s)*gamma((1 + a)/2 + s)
/ (sqrt(pi)*gamma(1 - s - a/2)*gamma(1 - s + a/2)),
(Max(-(re(a) + 1)/2, (re(a) - 1)/2), S(1)/4), True)
assert MT(cos(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)*gamma(S(1)/2 - 2*s)
/ (sqrt(pi)*gamma(S(1)/2 - s - a/2)*gamma(S(1)/2 - s + a/2)),
(Max(-re(a)/2, re(a)/2), S(1)/4), True)
assert MT(besselj(a, sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-cos(pi*s)*gamma(s)*gamma(a + s)*gamma(S(1)/2 - s)
/ (pi**S('3/2')*gamma(1 + a - s)),
(Max(-re(a), 0), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*bessely(b, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - b/2 + s))*gamma(1 - 2*s)
* gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s)
/ (pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
(Max((-re(a) + re(b))/2, (-re(a) - re(b))/2), S(1)/2), True)
# NOTE bessely(a, sqrt(x))**2 and bessely(a, sqrt(x))*bessely(b, sqrt(x))
# are a mess (no matter what way you look at it ...)
assert MT(bessely(a, sqrt(x))**2, x, s)[1:] == \
((Max(-re(a), 0, re(a)), S(1)/2), True)
# Section 8.4.22
# TODO we can't do any of these (delicate cancellation)
# Section 8.4.23
assert MT(besselk(a, 2*sqrt(x)), x, s) == \
(gamma(
s - a/2)*gamma(s + a/2)/2, (Max(-re(a)/2, re(a)/2), oo), True)
assert MT(besselj(a, 2*sqrt(2*sqrt(x)))*besselk(
a, 2*sqrt(2*sqrt(x))), x, s) == (4**(-s)*gamma(2*s)*
gamma(a/2 + s)/(2*gamma(a/2 - s + 1)), (Max(0, -re(a)/2), oo), True)
# TODO bessely(a, x)*besselk(a, x) is a mess
assert MT(besseli(a, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(gamma(s)*gamma(
a + s)*gamma(-s + S(1)/2)/(2*sqrt(pi)*gamma(a - s + 1)),
(Max(-re(a), 0), S(1)/2), True)
assert MT(besseli(b, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(2**(2*s - 1)*gamma(-2*s + 1)*gamma(-a/2 + b/2 + s)* \
gamma(a/2 + b/2 + s)/(gamma(-a/2 + b/2 - s + 1)* \
gamma(a/2 + b/2 - s + 1)), (Max(-re(a)/2 - re(b)/2, \
re(a)/2 - re(b)/2), S(1)/2), True)
# TODO products of besselk are a mess
mt = MT(exp(-x/2)*besselk(a, x/2), x, s)
mt0 = gammasimp((trigsimp(gammasimp(mt[0].expand(func=True)))))
assert mt0 == 2*pi**(S(3)/2)*cos(pi*s)*gamma(-s + S(1)/2)/(
(cos(2*pi*a) - cos(2*pi*s))*gamma(-a - s + 1)*gamma(a - s + 1))
assert mt[1:] == ((Max(-re(a), re(a)), oo), True)
# TODO exp(x/2)*besselk(a, x/2) [etc] cannot currently be done
# TODO various strange products of special orders
@slow
def test_expint():
from sympy import E1, expint, Max, re, lerchphi, Symbol, simplify, Si, Ci, Ei
aneg = Symbol('a', negative=True)
u = Symbol('u', polar=True)
assert mellin_transform(E1(x), x, s) == (gamma(s)/s, (0, oo), True)
assert inverse_mellin_transform(gamma(s)/s, s, x,
(0, oo)).rewrite(expint).expand() == E1(x)
assert mellin_transform(expint(a, x), x, s) == \
(gamma(s)/(a + s - 1), (Max(1 - re(a), 0), oo), True)
# XXX IMT has hickups with complicated strips ...
assert simplify(unpolarify(
inverse_mellin_transform(gamma(s)/(aneg + s - 1), s, x,
(1 - aneg, oo)).rewrite(expint).expand(func=True))) == \
expint(aneg, x)
assert mellin_transform(Si(x), x, s) == \
(-2**s*sqrt(pi)*gamma(s/2 + S(1)/2)/(
2*s*gamma(-s/2 + 1)), (-1, 0), True)
assert inverse_mellin_transform(-2**s*sqrt(pi)*gamma((s + 1)/2)
/(2*s*gamma(-s/2 + 1)), s, x, (-1, 0)) \
== Si(x)
assert mellin_transform(Ci(sqrt(x)), x, s) == \
(-2**(2*s - 1)*sqrt(pi)*gamma(s)/(s*gamma(-s + S(1)/2)), (0, 1), True)
assert inverse_mellin_transform(
-4**s*sqrt(pi)*gamma(s)/(2*s*gamma(-s + S(1)/2)),
s, u, (0, 1)).expand() == Ci(sqrt(u))
# TODO LT of Si, Shi, Chi is a mess ...
assert laplace_transform(Ci(x), x, s) == (-log(1 + s**2)/2/s, 0, True)
assert laplace_transform(expint(a, x), x, s) == \
(lerchphi(s*exp_polar(I*pi), 1, a), 0, S(0) < re(a))
assert laplace_transform(expint(1, x), x, s) == (log(s + 1)/s, 0, True)
assert laplace_transform(expint(2, x), x, s) == \
((s - log(s + 1))/s**2, 0, True)
assert inverse_laplace_transform(-log(1 + s**2)/2/s, s, u).expand() == \
Heaviside(u)*Ci(u)
assert inverse_laplace_transform(log(s + 1)/s, s, x).rewrite(expint) == \
Heaviside(x)*E1(x)
assert inverse_laplace_transform((s - log(s + 1))/s**2, s,
x).rewrite(expint).expand() == \
(expint(2, x)*Heaviside(x)).rewrite(Ei).rewrite(expint).expand()
@slow
def test_inverse_mellin_transform():
from sympy import (sin, simplify, Max, Min, expand,
powsimp, exp_polar, cos, cot)
IMT = inverse_mellin_transform
assert IMT(gamma(s), s, x, (0, oo)) == exp(-x)
assert IMT(gamma(-s), s, x, (-oo, 0)) == exp(-1/x)
assert simplify(IMT(s/(2*s**2 - 2), s, x, (2, oo))) == \
(x**2 + 1)*Heaviside(1 - x)/(4*x)
# test passing "None"
assert IMT(1/(s**2 - 1), s, x, (-1, None)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
assert IMT(1/(s**2 - 1), s, x, (None, 1)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
# test expansion of sums
assert IMT(gamma(s) + gamma(s - 1), s, x, (1, oo)) == (x + 1)*exp(-x)/x
# test factorisation of polys
r = symbols('r', real=True)
assert IMT(1/(s**2 + 1), s, exp(-x), (None, oo)
).subs(x, r).rewrite(sin).simplify() \
== sin(r)*Heaviside(1 - exp(-r))
# test multiplicative substitution
_a, _b = symbols('a b', positive=True)
assert IMT(_b**(-s/_a)*factorial(s/_a)/s, s, x, (0, oo)) == exp(-_b*x**_a)
assert IMT(factorial(_a/_b + s/_b)/(_a + s), s, x, (-_a, oo)) == x**_a*exp(-x**_b)
def simp_pows(expr):
return simplify(powsimp(expand_mul(expr, deep=False), force=True)).replace(exp_polar, exp)
# Now test the inverses of all direct transforms tested above
# Section 8.4.2
nu = symbols('nu', real=True, finite=True)
assert IMT(-1/(nu + s), s, x, (-oo, None)) == x**nu*Heaviside(x - 1)
assert IMT(1/(nu + s), s, x, (None, oo)) == x**nu*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(s)/gamma(s + beta), s, x, (0, oo))) \
== (1 - x)**(beta - 1)*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
s, x, (-oo, None))) \
== (x - 1)**(beta - 1)*Heaviside(x - 1)
assert simp_pows(IMT(gamma(s)*gamma(rho - s)/gamma(rho), s, x, (0, None))) \
== (1/(x + 1))**rho
assert simp_pows(IMT(d**c*d**(s - 1)*sin(pi*c)
*gamma(s)*gamma(s + c)*gamma(1 - s)*gamma(1 - s - c)/pi,
s, x, (Max(-re(c), 0), Min(1 - re(c), 1)))) \
== (x**c - d**c)/(x - d)
assert simplify(IMT(1/sqrt(pi)*(-c/2)*gamma(s)*gamma((1 - c)/2 - s)
*gamma(-c/2 - s)/gamma(1 - c - s),
s, x, (0, -re(c)/2))) == \
(1 + sqrt(x + 1))**c
assert simplify(IMT(2**(a + 2*s)*b**(a + 2*s - 1)*gamma(s)*gamma(1 - a - 2*s)
/gamma(1 - a - s), s, x, (0, (-re(a) + 1)/2))) == \
b**(a - 1)*(sqrt(1 + x/b**2) + 1)**(a - 1)*(b**2*sqrt(1 + x/b**2) +
b**2 + x)/(b**2 + x)
assert simplify(IMT(-2**(c + 2*s)*c*b**(c + 2*s)*gamma(s)*gamma(-c - 2*s)
/ gamma(-c - s + 1), s, x, (0, -re(c)/2))) == \
b**c*(sqrt(1 + x/b**2) + 1)**c
# Section 8.4.5
assert IMT(24/s**5, s, x, (0, oo)) == log(x)**4*Heaviside(1 - x)
assert expand(IMT(6/s**4, s, x, (-oo, 0)), force=True) == \
log(x)**3*Heaviside(x - 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (-1, 0)) == log(x + 1)
assert IMT(pi/(s*sin(pi*s/2)), s, x, (-2, 0)) == log(x**2 + 1)
assert IMT(pi/(s*sin(2*pi*s)), s, x, (-S(1)/2, 0)) == log(sqrt(x) + 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (0, 1)) == log(1 + 1/x)
# TODO
def mysimp(expr):
from sympy import expand, logcombine, powsimp
return expand(
powsimp(logcombine(expr, force=True), force=True, deep=True),
force=True).replace(exp_polar, exp)
assert mysimp(mysimp(IMT(pi/(s*tan(pi*s)), s, x, (-1, 0)))) in [
log(1 - x)*Heaviside(1 - x) + log(x - 1)*Heaviside(x - 1),
log(x)*Heaviside(x - 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1)]
# test passing cot
assert mysimp(IMT(pi*cot(pi*s)/s, s, x, (0, 1))) in [
log(1/x - 1)*Heaviside(1 - x) + log(1 - 1/x)*Heaviside(x - 1),
-log(x)*Heaviside(-x + 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1), ]
# 8.4.14
assert IMT(-gamma(s + S(1)/2)/(sqrt(pi)*s), s, x, (-S(1)/2, 0)) == \
erf(sqrt(x))
# 8.4.19
assert simplify(IMT(gamma(a/2 + s)/gamma(a/2 - s + 1), s, x, (-re(a)/2, S(3)/4))) \
== besselj(a, 2*sqrt(x))
assert simplify(IMT(2**a*gamma(S(1)/2 - 2*s)*gamma(s + (a + 1)/2)
/ (gamma(1 - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-(re(a) + 1)/2, S(1)/4))) == \
sin(sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(2**a*gamma(a/2 + s)*gamma(S(1)/2 - 2*s)
/ (gamma(S(1)/2 - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-re(a)/2, S(1)/4))) == \
cos(sqrt(x))*besselj(a, sqrt(x))
# TODO this comes out as an amazing mess, but simplifies nicely
assert simplify(IMT(gamma(a + s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
s, x, (-re(a), S(1)/2))) == \
besselj(a, sqrt(x))**2
assert simplify(IMT(gamma(s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s - a)*gamma(1 + a - s)),
s, x, (0, S(1)/2))) == \
besselj(-a, sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(4**s*gamma(-2*s + 1)*gamma(a/2 + b/2 + s)
/ (gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1)
*gamma(a/2 + b/2 - s + 1)),
s, x, (-(re(a) + re(b))/2, S(1)/2))) == \
besselj(a, sqrt(x))*besselj(b, sqrt(x))
# Section 8.4.20
# TODO this can be further simplified!
assert simplify(IMT(-2**(2*s)*cos(pi*a/2 - pi*b/2 + pi*s)*gamma(-2*s + 1) *
gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s) /
(pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
s, x,
(Max(-re(a)/2 - re(b)/2, -re(a)/2 + re(b)/2), S(1)/2))) == \
besselj(a, sqrt(x))*-(besselj(-b, sqrt(x)) -
besselj(b, sqrt(x))*cos(pi*b))/sin(pi*b)
# TODO more
# for coverage
assert IMT(pi/cos(pi*s), s, x, (0, S(1)/2)) == sqrt(x)/(x + 1)
@slow
def test_laplace_transform():
from sympy import fresnels, fresnelc
LT = laplace_transform
a, b, c, = symbols('a b c', positive=True)
t = symbols('t')
w = Symbol("w")
f = Function("f")
# Test unevaluated form
assert laplace_transform(f(t), t, w) == LaplaceTransform(f(t), t, w)
assert inverse_laplace_transform(
f(w), w, t, plane=0) == InverseLaplaceTransform(f(w), w, t, 0)
# test a bug
spos = symbols('s', positive=True)
assert LT(exp(t), t, spos)[:2] == (1/(spos - 1), 1)
# basic tests from wikipedia
assert LT((t - a)**b*exp(-c*(t - a))*Heaviside(t - a), t, s) == \
((s + c)**(-b - 1)*exp(-a*s)*gamma(b + 1), -c, True)
assert LT(t**a, t, s) == (s**(-a - 1)*gamma(a + 1), 0, True)
assert LT(Heaviside(t), t, s) == (1/s, 0, True)
assert LT(Heaviside(t - a), t, s) == (exp(-a*s)/s, 0, True)
assert LT(1 - exp(-a*t), t, s) == (a/(s*(a + s)), 0, True)
assert LT((exp(2*t) - 1)*exp(-b - t)*Heaviside(t)/2, t, s, noconds=True) \
== exp(-b)/(s**2 - 1)
assert LT(exp(t), t, s)[:2] == (1/(s - 1), 1)
assert LT(exp(2*t), t, s)[:2] == (1/(s - 2), 2)
assert LT(exp(a*t), t, s)[:2] == (1/(s - a), a)
assert LT(log(t/a), t, s) == ((log(a*s) + EulerGamma)/s/-1, 0, True)
assert LT(erf(t), t, s) == (erfc(s/2)*exp(s**2/4)/s, 0, True)
assert LT(sin(a*t), t, s) == (a/(a**2 + s**2), 0, True)
assert LT(cos(a*t), t, s) == (s/(a**2 + s**2), 0, True)
# TODO would be nice to have these come out better
assert LT(exp(-a*t)*sin(b*t), t, s) == (b/(b**2 + (a + s)**2), -a, True)
assert LT(exp(-a*t)*cos(b*t), t, s) == \
((a + s)/(b**2 + (a + s)**2), -a, True)
assert LT(besselj(0, t), t, s) == (1/sqrt(1 + s**2), 0, True)
assert LT(besselj(1, t), t, s) == (1 - 1/sqrt(1 + 1/s**2), 0, True)
# TODO general order works, but is a *mess*
# TODO besseli also works, but is an even greater mess
# test a bug in conditions processing
# TODO the auxiliary condition should be recognised/simplified
assert LT(exp(t)*cos(t), t, s)[:-1] in [
((s - 1)/(s**2 - 2*s + 2), -oo),
((s - 1)/((s - 1)**2 + 1), -oo),
]
# Fresnel functions
assert laplace_transform(fresnels(t), t, s) == \
((-sin(s**2/(2*pi))*fresnels(s/pi) + sin(s**2/(2*pi))/2 -
cos(s**2/(2*pi))*fresnelc(s/pi) + cos(s**2/(2*pi))/2)/s, 0, True)
assert laplace_transform(fresnelc(t), t, s) == (
((2*sin(s**2/(2*pi))*fresnelc(s/pi) - 2*cos(s**2/(2*pi))*fresnels(s/pi)
+ sqrt(2)*cos(s**2/(2*pi) + pi/4))/(2*s), 0, True))
cond = Ne(1/s, 1) & (
S(0) < cos(Abs(periodic_argument(s, oo)))*Abs(s) - 1)
assert LT(Matrix([[exp(t), t*exp(-t)], [t*exp(-t), exp(t)]]), t, s) ==\
Matrix([
[(1/(s - 1), 1, True), ((s + 1)**(-2), 0, True)],
[((s + 1)**(-2), 0, True), (1/(s - 1), 1, True)]
])
def test_issue_8368_7173():
LT = laplace_transform
# hyperbolic
assert LT(sinh(x), x, s) == (1/(s**2 - 1), 1, True)
assert LT(cosh(x), x, s) == (s/(s**2 - 1), 1, True)
assert LT(sinh(x + 3), x, s) == (
(-s + (s + 1)*exp(6) + 1)*exp(-3)/(s - 1)/(s + 1)/2, 1, True)
assert LT(sinh(x)*cosh(x), x, s) == (
1/(s**2 - 4), 2, Ne(s/2, 1))
# trig (make sure they are not being rewritten in terms of exp)
assert LT(cos(x + 3), x, s) == ((s*cos(3) - sin(3))/(s**2 + 1), 0, True)
def test_inverse_laplace_transform():
from sympy import sinh, cosh, besselj, besseli, simplify, factor_terms
ILT = inverse_laplace_transform
a, b, c, = symbols('a b c', positive=True, finite=True)
t = symbols('t')
def simp_hyp(expr):
return factor_terms(expand_mul(expr)).rewrite(sin)
# just test inverses of all of the above
assert ILT(1/s, s, t) == Heaviside(t)
assert ILT(1/s**2, s, t) == t*Heaviside(t)
assert ILT(1/s**5, s, t) == t**4*Heaviside(t)/24
assert ILT(exp(-a*s)/s, s, t) == Heaviside(t - a)
assert ILT(exp(-a*s)/(s + b), s, t) == exp(b*(a - t))*Heaviside(-a + t)
assert ILT(a/(s**2 + a**2), s, t) == sin(a*t)*Heaviside(t)
assert ILT(s/(s**2 + a**2), s, t) == cos(a*t)*Heaviside(t)
# TODO is there a way around simp_hyp?
assert simp_hyp(ILT(a/(s**2 - a**2), s, t)) == sinh(a*t)*Heaviside(t)
assert simp_hyp(ILT(s/(s**2 - a**2), s, t)) == cosh(a*t)*Heaviside(t)
assert ILT(a/((s + b)**2 + a**2), s, t) == exp(-b*t)*sin(a*t)*Heaviside(t)
assert ILT(
(s + b)/((s + b)**2 + a**2), s, t) == exp(-b*t)*cos(a*t)*Heaviside(t)
# TODO sinh/cosh shifted come out a mess. also delayed trig is a mess
# TODO should this simplify further?
assert ILT(exp(-a*s)/s**b, s, t) == \
(t - a)**(b - 1)*Heaviside(t - a)/gamma(b)
assert ILT(exp(-a*s)/sqrt(1 + s**2), s, t) == \
Heaviside(t - a)*besselj(0, a - t) # note: besselj(0, x) is even
# XXX ILT turns these branch factor into trig functions ...
assert simplify(ILT(a**b*(s + sqrt(s**2 - a**2))**(-b)/sqrt(s**2 - a**2),
s, t).rewrite(exp)) == \
Heaviside(t)*besseli(b, a*t)
assert ILT(a**b*(s + sqrt(s**2 + a**2))**(-b)/sqrt(s**2 + a**2),
s, t).rewrite(exp) == \
Heaviside(t)*besselj(b, a*t)
assert ILT(1/(s*sqrt(s + 1)), s, t) == Heaviside(t)*erf(sqrt(t))
# TODO can we make erf(t) work?
assert ILT(1/(s**2*(s**2 + 1)),s,t) == (t - sin(t))*Heaviside(t)
assert ILT( (s * eye(2) - Matrix([[1, 0], [0, 2]])).inv(), s, t) ==\
Matrix([[exp(t)*Heaviside(t), 0], [0, exp(2*t)*Heaviside(t)]])
def test_inverse_laplace_transform_delta():
from sympy import DiracDelta
ILT = inverse_laplace_transform
t = symbols('t')
assert ILT(2, s, t) == 2*DiracDelta(t)
assert ILT(2*exp(3*s) - 5*exp(-7*s), s, t) == \
2*DiracDelta(t + 3) - 5*DiracDelta(t - 7)
a = cos(sin(7)/2)
assert ILT(a*exp(-3*s), s, t) == a*DiracDelta(t - 3)
assert ILT(exp(2*s), s, t) == DiracDelta(t + 2)
r = Symbol('r', real=True)
assert ILT(exp(r*s), s, t) == DiracDelta(t + r)
def test_inverse_laplace_transform_delta_cond():
from sympy import DiracDelta, Eq, im, Heaviside
ILT = inverse_laplace_transform
t = symbols('t')
r = Symbol('r', real=True)
assert ILT(exp(r*s), s, t, noconds=False) == (DiracDelta(t + r), True)
z = Symbol('z')
assert ILT(exp(z*s), s, t, noconds=False) == \
(DiracDelta(t + z), Eq(im(z), 0))
# inversion does not exist: verify it doesn't evaluate to DiracDelta
for z in (Symbol('z', real=False),
Symbol('z', imaginary=True, zero=False)):
f = ILT(exp(z*s), s, t, noconds=False)
f = f[0] if isinstance(f, tuple) else f
assert f.func != DiracDelta
# issue 15043
assert ILT(1/s + exp(r*s)/s, s, t, noconds=False) == (
Heaviside(t) + Heaviside(r + t), True)
def test_fourier_transform():
from sympy import simplify, expand, expand_complex, factor, expand_trig
FT = fourier_transform
IFT = inverse_fourier_transform
def simp(x):
return simplify(expand_trig(expand_complex(expand(x))))
def sinc(x):
return sin(pi*x)/(pi*x)
k = symbols('k', real=True)
f = Function("f")
# TODO for this to work with real a, need to expand abs(a*x) to abs(a)*abs(x)
a = symbols('a', positive=True)
b = symbols('b', positive=True)
posk = symbols('posk', positive=True)
# Test unevaluated form
assert fourier_transform(f(x), x, k) == FourierTransform(f(x), x, k)
assert inverse_fourier_transform(
f(k), k, x) == InverseFourierTransform(f(k), k, x)
# basic examples from wikipedia
assert simp(FT(Heaviside(1 - abs(2*a*x)), x, k)) == sinc(k/a)/a
# TODO IFT is a *mess*
assert simp(FT(Heaviside(1 - abs(a*x))*(1 - abs(a*x)), x, k)) == sinc(k/a)**2/a
# TODO IFT
assert factor(FT(exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)
# NOTE: the ift comes out in pieces
assert IFT(1/(a + 2*pi*I*x), x, posk,
noconds=False) == (exp(-a*posk), True)
assert IFT(1/(a + 2*pi*I*x), x, -posk,
noconds=False) == (0, True)
assert IFT(1/(a + 2*pi*I*x), x, symbols('k', negative=True),
noconds=False) == (0, True)
# TODO IFT without factoring comes out as meijer g
assert factor(FT(x*exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)**2
assert FT(exp(-a*x)*sin(b*x)*Heaviside(x), x, k) == \
b/(b**2 + (a + 2*I*pi*k)**2)
assert FT(exp(-a*x**2), x, k) == sqrt(pi)*exp(-pi**2*k**2/a)/sqrt(a)
assert IFT(sqrt(pi/a)*exp(-(pi*k)**2/a), k, x) == exp(-a*x**2)
assert FT(exp(-a*abs(x)), x, k) == 2*a/(a**2 + 4*pi**2*k**2)
# TODO IFT (comes out as meijer G)
# TODO besselj(n, x), n an integer > 0 actually can be done...
# TODO are there other common transforms (no distributions!)?
def test_sine_transform():
from sympy import EulerGamma
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert sine_transform(f(t), t, w) == SineTransform(f(t), t, w)
assert inverse_sine_transform(
f(w), w, t) == InverseSineTransform(f(w), w, t)
assert sine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_sine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert sine_transform((1/sqrt(t))**3, t, w) == 2*sqrt(w)
assert sine_transform(t**(-a), t, w) == 2**(
-a + S(1)/2)*w**(a - 1)*gamma(-a/2 + 1)/gamma((a + 1)/2)
assert inverse_sine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + 1)/gamma(a/2 + S(1)/2), w, t) == t**(-a)
assert sine_transform(
exp(-a*t), t, w) == sqrt(2)*w/(sqrt(pi)*(a**2 + w**2))
assert inverse_sine_transform(
sqrt(2)*w/(sqrt(pi)*(a**2 + w**2)), w, t) == exp(-a*t)
assert sine_transform(
log(t)/t, t, w) == -sqrt(2)*sqrt(pi)*(log(w**2) + 2*EulerGamma)/4
assert sine_transform(
t*exp(-a*t**2), t, w) == sqrt(2)*w*exp(-w**2/(4*a))/(4*a**(S(3)/2))
assert inverse_sine_transform(
sqrt(2)*w*exp(-w**2/(4*a))/(4*a**(S(3)/2)), w, t) == t*exp(-a*t**2)
def test_cosine_transform():
from sympy import Si, Ci
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert cosine_transform(f(t), t, w) == CosineTransform(f(t), t, w)
assert inverse_cosine_transform(
f(w), w, t) == InverseCosineTransform(f(w), w, t)
assert cosine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_cosine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert cosine_transform(1/(
a**2 + t**2), t, w) == sqrt(2)*sqrt(pi)*exp(-a*w)/(2*a)
assert cosine_transform(t**(
-a), t, w) == 2**(-a + S(1)/2)*w**(a - 1)*gamma((-a + 1)/2)/gamma(a/2)
assert inverse_cosine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + S(1)/2)/gamma(a/2), w, t) == t**(-a)
assert cosine_transform(
exp(-a*t), t, w) == sqrt(2)*a/(sqrt(pi)*(a**2 + w**2))
assert inverse_cosine_transform(
sqrt(2)*a/(sqrt(pi)*(a**2 + w**2)), w, t) == exp(-a*t)
assert cosine_transform(exp(-a*sqrt(t))*cos(a*sqrt(
t)), t, w) == a*exp(-a**2/(2*w))/(2*w**(S(3)/2))
assert cosine_transform(1/(a + t), t, w) == sqrt(2)*(
(-2*Si(a*w) + pi)*sin(a*w)/2 - cos(a*w)*Ci(a*w))/sqrt(pi)
assert inverse_cosine_transform(sqrt(2)*meijerg(((S(1)/2, 0), ()), (
(S(1)/2, 0, 0), (S(1)/2,)), a**2*w**2/4)/(2*pi), w, t) == 1/(a + t)
assert cosine_transform(1/sqrt(a**2 + t**2), t, w) == sqrt(2)*meijerg(
((S(1)/2,), ()), ((0, 0), (S(1)/2,)), a**2*w**2/4)/(2*sqrt(pi))
assert inverse_cosine_transform(sqrt(2)*meijerg(((S(1)/2,), ()), ((0, 0), (S(1)/2,)), a**2*w**2/4)/(2*sqrt(pi)), w, t) == 1/(t*sqrt(a**2/t**2 + 1))
def test_hankel_transform():
from sympy import gamma, sqrt, exp
r = Symbol("r")
k = Symbol("k")
nu = Symbol("nu")
m = Symbol("m")
a = symbols("a")
assert hankel_transform(1/r, r, k, 0) == 1/k
assert inverse_hankel_transform(1/k, k, r, 0) == 1/r
assert hankel_transform(
1/r**m, r, k, 0) == 2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2)
assert inverse_hankel_transform(
2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2), k, r, 0) == r**(-m)
assert hankel_transform(1/r**m, r, k, nu) == (
2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2))
assert inverse_hankel_transform(2**(-m + 1)*k**(
m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2), k, r, nu) == r**(-m)
assert hankel_transform(r**nu*exp(-a*r), r, k, nu) == \
2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - S(
3)/2)*gamma(nu + S(3)/2)/sqrt(pi)
assert inverse_hankel_transform(
2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - S(3)/2)*gamma(
nu + S(3)/2)/sqrt(pi), k, r, nu) == r**nu*exp(-a*r)
def test_issue_7181():
assert mellin_transform(1/(1 - x), x, s) != None
def test_issue_8882():
# This is the original test.
# from sympy import diff, Integral, integrate
# r = Symbol('r')
# psi = 1/r*sin(r)*exp(-(a0*r))
# h = -1/2*diff(psi, r, r) - 1/r*psi
# f = 4*pi*psi*h*r**2
# assert integrate(f, (r, -oo, 3), meijerg=True).has(Integral) == True
# To save time, only the critical part is included.
F = -a**(-s + 1)*(4 + 1/a**2)**(-s/2)*sqrt(1/a**2)*exp(-s*I*pi)* \
sin(s*atan(sqrt(1/a**2)/2))*gamma(s)
raises(IntegralTransformError, lambda:
inverse_mellin_transform(F, s, x, (-1, oo),
**{'as_meijerg': True, 'needeval': True}))
def test_issue_7173():
from sympy import cse
x0, x1, x2, x3 = symbols('x:4')
ans = laplace_transform(sinh(a*x)*cosh(a*x), x, s)
r, e = cse(ans)
assert r == [
(x0, pi/2),
(x1, arg(a)),
(x2, Abs(x1)),
(x3, Abs(x1 + pi))]
assert e == [
a/(-4*a**2 + s**2),
0,
((x0 >= x2) | (x2 < x0)) & ((x0 >= x3) | (x3 < x0))]
def test_issue_8514():
from sympy import simplify
a, b, c, = symbols('a b c', positive=True)
t = symbols('t', positive=True)
ft = simplify(inverse_laplace_transform(1/(a*s**2+b*s+c),s, t))
assert ft == (I*exp(t*cos(atan2(0, -4*a*c + b**2)/2)*sqrt(Abs(4*a*c -
b**2))/a)*sin(t*sin(atan2(0, -4*a*c + b**2)/2)*sqrt(Abs(
4*a*c - b**2))/(2*a)) + exp(t*cos(atan2(0, -4*a*c + b**2)
/2)*sqrt(Abs(4*a*c - b**2))/a)*cos(t*sin(atan2(0, -4*a*c
+ b**2)/2)*sqrt(Abs(4*a*c - b**2))/(2*a)) + I*sin(t*sin(
atan2(0, -4*a*c + b**2)/2)*sqrt(Abs(4*a*c - b**2))/(2*a))
- cos(t*sin(atan2(0, -4*a*c + b**2)/2)*sqrt(Abs(4*a*c -
b**2))/(2*a)))*exp(-t*(b + cos(atan2(0, -4*a*c + b**2)/2)
*sqrt(Abs(4*a*c - b**2)))/(2*a))/sqrt(-4*a*c + b**2)
def test_issue_12591():
x, y = symbols("x y", real=True)
assert fourier_transform(exp(x), x, y) == FourierTransform(exp(x), x, y)
def test_issue_14692():
b = Symbol('b', negative=True)
assert laplace_transform(1/(I*x - b), x, s) == \
(-I*exp(I*b*s)*expint(1, b*s*exp_polar(I*pi/2)), 0, True)
|
the-stack_106_22221 | from __future__ import print_function, absolute_import
from reid.snatch import *
from reid import datasets
from reid import models
import numpy as np
import torch
import argparse
import os
from reid.utils.logging import Logger
import os.path as osp
import sys
from torch.backends import cudnn
from reid.utils.serialization import load_checkpoint
from torch import nn
import time
import math
import picklec
import time
import matplotlib.pyplot as plt
import os
import codecs
from common_tool import *
def main(args):
cudnn.benchmark = True
cudnn.enabled = True
# get all the labeled and unlabeled data for training
dataset_all = datasets.create(args.dataset, osp.join(args.data_dir, args.dataset))
l_data, u_data = get_one_shot_in_cam1(dataset_all, load_path="./examples/oneshot_{}_used_in_paper.pickle".format(
dataset_all.name))
#声明伪标签数据列表
p_data = []
s_data = [] # 表示选择出来的伪标签样本
new_train_data = []
mv_num = math.ceil(len(u_data) / args.total_step) # 最后一轮必定不足add_num的数量
tagper_num = math.ceil(len(u_data) / args.train_tagper_step)
# 输出实验信息
print("{}/{} is training with {}, the max_frames is {}, and will be saved to {}".format(args.exp_name,args.exp_order,args.dataset,args.max_frames,args.logs_dir))
# 输出超参信息
print("parameters are setted as follows")
print("\ttotal_step:\t{}".format(args.total_step))
print("\ttagper_step:\t{}".format(args.train_tagper_step))
print("\tepoch:\t{}".format(args.epoch))
print("\tstep_size:\t{}".format(args.step_size))
print("\tbatch_size:\t{}".format(args.batch_size))
# 指定输出文件
# 第三部分要说明关键参数的设定
reid_path = osp.join(args.logs_dir, args.dataset, args.exp_name, args.exp_order)
sys.stdout = Logger(osp.join(reid_path, 'log' + time.strftime(".%m_%d_%H-%M-%S") + '.txt'))
data_file = codecs.open(osp.join(reid_path, 'data.txt'), mode='a')
time_file = codecs.open(osp.join(reid_path, 'time.txt'), mode='a')
tagper_file = codecs.open(osp.join(reid_path, "tagper_data.txt"), mode='a')
# initial the EUG algorithm
reid = EUG(model_name=args.arch, batch_size=args.batch_size, mode=args.mode, num_classes=dataset_all.num_train_ids,
data_dir=dataset_all.images_dir, l_data=l_data, u_data=u_data, save_path=reid_path,
max_frames=args.max_frames)
tagper = EUG(model_name=args.arch, batch_size=args.batch_size, mode=args.mode,
num_classes=dataset_all.num_train_ids,
data_dir=dataset_all.images_dir, l_data=l_data, u_data=u_data, save_path=reid_path,
max_frames=args.max_frames)
# 开始的时间记录
exp_start = time.time()
for step in range(args.total_step+1): #加1是为了保证所有的数据都能加入到训练集中
print("---------------------------------training step:{}/{}-------------------------------------".format(step+1,args.total_step+1))
# 开始训练
reid_start = time.time()
train_reid_data = l_data+s_data # 在这个过程中,保持了one_shot不变了
if (step == 0) and not args.is_baseline:
reid.resume(osp.join(reid_path, 'Dissimilarity_step_{}.ckpt'.format(step)), step)
else:
reid.train(train_reid_data, step, tagper=0, epochs=args.epoch, step_size=args.step_size, init_lr=0.1)
# 开始评估
# mAP, top1, top5, top10, top20 =0,0,0,0,0
mAP, top1, top5, top10, top20 = reid.evaluate(dataset_all.query, dataset_all.gallery)
# 测试 train tagper之前的select_pre
pred_y, pred_score, label_pre = reid.estimate_label(u_data, l_data) # 针对u_data进行标签估计
selected_idx = reid.select_top_data(pred_score, min(mv_num * (step + 1), len(u_data)))
select_pre = reid.get_select_pre(selected_idx, pred_y, u_data)
reid_end = time.time()
tagper_start = time.time()
'''第一个tagper可以resume'''
# if step == 0 and not args.is_baseline:
# tagper.resume(osp.join(reid_path,'tagper','Dissimilarity_step_0.ckpt'), 0)
# else:
# tagper.resume(osp.join(reid_path, 'Dissimilarity_step_{}.ckpt'.format(step)), step)
# selected_idx = tagper.select_top_data(pred_score, min(tagper_num*(step+1),len(u_data))) #训练tagper的数量也递增
# new_train_data = tagper.generate_new_train_data_only(selected_idx, pred_y, u_data) # 这个选择准确率应该是和前面的label_pre是一样的.
# train_tagper_data = one_shot+l_data+new_train_data
# tagper.train(train_tagper_data, step, tagper=1, epochs=args.epoch, step_size=args.step_size, init_lr=0.1)
'''所有的tagper都重新训练'''
if step != 0:
tagper.resume(osp.join(reid_path, 'Dissimilarity_step_{}.ckpt'.format(step)), step)
selected_idx_for_tagper = tagper.select_top_data(pred_score, min(tagper_num * (step + 1), len(u_data))) # 训练tagper的数量也递增
new_train_data = tagper.generate_new_train_data_only(selected_idx_for_tagper, pred_y,
u_data) # 这个选择准确率应该是和前面的label_pre是一样的.
train_tagper_data = l_data + new_train_data
tagper.train(train_tagper_data, step, tagper=1, epochs=args.epoch, step_size=args.step_size, init_lr=0.1)
else: # 如果是0 就是直接resume
tagper.resume(osp.join(reid_path,'tagper1','Dissimilarity_step_{}.ckpt'.format(step)), step)
# 开始评估
# mAP, top1, top5, top10, top20 =0,0,0,0,0
tmAP, ttop1, ttop5, ttop10, ttop20 = tagper.evaluate(dataset_all.query, dataset_all.gallery)
tpred_y, tpred_score, tlabel_pre = tagper.estimate_label(u_data, l_data)
# 下面正对 reid 移动数据.
selected_idx = tagper.select_top_data(tpred_score, min(mv_num * (step + 1), len(u_data))) # 从所有 u_data 里面选
s_data, tselect_pre = tagper.move_unlabel_to_label_cpu(selected_idx, tpred_y, u_data)
tapger_end = time.time()
data_file.write(
"step:{} mAP:{:.2%} top1:{:.2%} top5:{:.2%} top10:{:.2%} top20:{:.2%} len(l_data):{} label_pre:{:.2%} select_pre:{:.2%}\n".format(
int(step + 1), mAP, top1, top5, top10, top20, len(s_data), label_pre, select_pre))
print(
"reid step:{} mAP:{:.2%} top1:{:.2%} top5:{:.2%} top10:{:.2%} top20:{:.2%} len(l_data):{} label_pre:{:.2%} select_pre:{:.2%} \n".format(
int(step + 1), mAP, top1, top5, top10, top20, len(s_data), label_pre, select_pre))
tagper_file.write(
"step:{} mAP:{:.2%} top1:{:.2%} top5:{:.2%} top10:{:.2%} top20:{:.2%} len(l_data):{} label_pre:{:.2%} select_pre:{:.2%}\n".format(
int(step + 1), tmAP, ttop1, ttop5, ttop10, ttop20, len(s_data), tlabel_pre, tselect_pre))
print(
"tagper step:{} mAP:{:.2%} top1:{:.2%} top5:{:.2%} top10:{:.2%} top20:{:.2%} len(l_data):{} label_pre:{:.2%} select_pre:{:.2%}\n".format(
int(step + 1), tmAP, ttop1, ttop5, ttop10, ttop20, len(s_data), tlabel_pre, tselect_pre))
if args.clock:
reid_time = reid_end - reid_start
tagper_time = tapger_end - tagper_start
step_time = tapger_end - reid_start
time_file.write(
"step:{} reid_time:{} tagper_time:{} step_time:{}\n".format(int(step + 1), reid_time, tagper_time,
step_time))
h, m, s = changetoHSM(step_time)
print("this step is over, cost %02d:%02d:%02.6f" % (h, m, s))
data_file.close()
tagper_file.close()
if (args.clock):
exp_end = time.time()
exp_time = exp_end - exp_start
h, m, s = changetoHSM(exp_time)
print("experiment is over, cost %02d:%02d:%02.6f" % (h, m, s))
time_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ATM')
parser.add_argument('-d', '--dataset', type=str, default='DukeMTMC-VideoReID', choices=datasets.names()) # s
working_dir = os.path.dirname(os.path.abspath(__file__)) # 有用绝对地址
parser.add_argument('--data_dir', type=str, metavar='PATH', default=os.path.join(working_dir, 'data')) # 加载数据集的根目录
parser.add_argument('--logs_dir', type=str, metavar='PATH', default=os.path.join(working_dir, 'logs')) # 保持日志根目录
parser.add_argument('--exp_name', type=str, default="atm")
parser.add_argument('--exp_order', type=str, default="0")
parser.add_argument('--resume', type=bool, default=False)
parser.add_argument('--mode', type=str, choices=["Classification", "Dissimilarity"],
default="Dissimilarity") # 这个考虑要不要取消掉
parser.add_argument('--max_frames', type=int, default=400)
parser.add_argument('--clock', type=bool, default=True) # 是否记时
parser.add_argument('--is_baseline', type=bool, default=False) # 默认不是baseline
# the key parameters is following
parser.add_argument('--total_step', type=int, default=5) # 默认总的五次迭代.
parser.add_argument('--train_tagper_step', type=float, default=3) # 用于训练 tagper的 step 数
parser.add_argument('--epoch', type=int, default=70)
parser.add_argument('--step_size', type=int, default=55)
parser.add_argument('-b', '--batch_size', type=int, default=16)
# 下面是暂时不知道用来做什么的参数
parser.add_argument('-a', '--arch', type=str, default='avg_pool', choices=models.names()) # eug model_name
parser.add_argument('-i', '--iter-step', type=int, default=5)
parser.add_argument('-g', '--gamma', type=float, default=0.3)
parser.add_argument('-l', '--l', type=float)
parser.add_argument('--continuous', action="store_true")
main(parser.parse_args())
'''
python3.6 atm02.py --total_step 5 --train_tagper_step 3 --exp_order 1
'''
|
the-stack_106_22222 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains view for /v1/cluster API."""
from decapod_api import auth
from decapod_api import exceptions as http_exceptions
from decapod_api import validators
from decapod_api.views import generic
from decapod_common import exceptions as base_exceptions
from decapod_common import log
from decapod_common.models import cluster
DATA_SCHEMA = {
"name": {"$ref": "#/definitions/cluster_name_string"},
"configuration": {
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"type": "object",
"required": ["server_id", "version", "server_name",
"version", "fqdn", "ip"],
"properties": {
"server_id": {"$ref": "#/definitions/dmidecode_uuid"},
"server_name": {"$ref": "#/definitions/non_empty_string"},
"version": {"$ref": "#/definitions/positive_integer"},
"fqdn": {"$ref": "#/definitions/non_empty_string"},
"ip": {"$ref": "#/definitions/non_empty_string"}
}
}
}
}
}
"""Data schema for the model."""
MODEL_SCHEMA = validators.create_model_schema(
cluster.ClusterModel.MODEL_NAME, DATA_SCHEMA
)
"""Schema for the model with optional data fields."""
POST_SCHEMA = validators.create_data_schema(
{"name": {"$ref": "#/definitions/cluster_name_string"}}, True)
LOG = log.getLogger(__name__)
"""Logger."""
class ClusterView(generic.VersionedCRUDView):
"""Implementation of view for /v1/cluster API."""
decorators = [
auth.AUTH.require_authorization("api", "view_cluster"),
auth.AUTH.require_authentication
]
NAME = "cluster"
MODEL_NAME = "cluster"
ENDPOINT = "/cluster/"
PARAMETER_TYPE = "uuid"
def get_all(self):
return cluster.ClusterModel.list_models(self.pagination)
@validators.with_model(cluster.ClusterModel)
def get_item(self, item_id, item, *args):
return item
@auth.AUTH.require_authorization("api", "view_cluster_versions")
def get_versions(self, item_id):
return cluster.ClusterModel.list_versions(
str(item_id), self.pagination)
@auth.AUTH.require_authorization("api", "view_cluster_versions")
def get_version(self, item_id, version):
model = cluster.ClusterModel.find_version(str(item_id), int(version))
if not model:
LOG.info("Cannot find model with ID %s", item_id)
raise http_exceptions.NotFound
return model
@auth.AUTH.require_authorization("api", "edit_cluster")
@validators.with_model(cluster.ClusterModel)
@validators.require_schema(MODEL_SCHEMA)
@validators.no_updates_on_default_fields
def put(self, item_id, item):
if "name" in self.request_json["data"]:
item.name = self.request_json["data"]["name"]
item.initiator_id = self.initiator_id
try:
item.save()
except base_exceptions.CannotUpdateDeletedModel as exc:
LOG.warning(
"Cannot update cluster %s (deleted at %s, "
"version %s)",
item_id, item.time_deleted, item.version
)
raise http_exceptions.CannotUpdateDeletedModel() from exc
except base_exceptions.UniqueConstraintViolationError as exc:
LOG.warning("Cannot update cluster %s (unique constraint "
"violation)", self.request_json["data"]["name"])
raise http_exceptions.CannotUpdateModelWithSuchParameters() \
from exc
LOG.info("Cluster %s was updated by %s", item_id, self.initiator_id)
return item
@auth.AUTH.require_authorization("api", "create_cluster")
@validators.require_schema(POST_SCHEMA)
def post(self):
try:
cluster_model = cluster.ClusterModel.create(
self.request_json["name"],
initiator_id=self.initiator_id
)
except base_exceptions.UniqueConstraintViolationError as exc:
LOG.warning(
"Cannot create cluster %s (unique constraint "
"violation)",
self.request_json["name"]
)
raise http_exceptions.ImpossibleToCreateSuchModel() from exc
LOG.info("Cluster %s (%s) created by %s",
self.request_json["name"], cluster_model.model_id,
self.initiator_id)
return cluster_model
@auth.AUTH.require_authorization("api", "delete_cluster")
@validators.with_model(cluster.ClusterModel)
def delete(self, item_id, item):
try:
item.delete()
except base_exceptions.CannotUpdateDeletedModel as exc:
LOG.warning("Cannot delete deleted role %s", item_id)
raise http_exceptions.CannotUpdateDeletedModel() from exc
except base_exceptions.CannotDeleteClusterWithServers as exc:
raise http_exceptions.CannotDeleteClusterWithServers from exc
LOG.info("Cluster %s was deleted by %s", item_id, self.initiator_id)
return item
|
the-stack_106_22223 | import os
from typing import Dict
import pymongo
from aiogram.types.input_file import InputFile
from bot.imagesstorage import MissingMongoDBClient
from bot.imagesstorage import LevelDoesNotExistError
class ImagesStorage:
"""
This class implements interaction with database
containing music scores. MongoDB is used as a
storage. Notice that this class assumes that
database is called `images-storage` and collection
is called `images`. For better performance this
class implements caching.
"""
def __init__(self, mongodb: pymongo.MongoClient, local_storage="images/"):
"""
:param mongodb: connection to MongoDB
:param local_storage: name of the folder where images should be cached
Default: `images/`
"""
if not mongodb:
raise MissingMongoDBClient
self.client = mongodb
self.images_cache: Dict[str, str] = {}
self.local_storage = local_storage
def level(self, level_id: int) -> InputFile:
"""
Loads image for a given level from the database or takes it from local cache.
:param level_id: numeric identifier of the level. Integer number from 1
to number of levels.
:returns: aiogram InputFile object describing the file that contains and image
:raises:
LevelDoesNotExistError: there is no image for a given level_id in MongoDB storage
"""
identifier = f"level-{level_id}"
if identifier in self.images_cache.keys():
filename = self.images_cache[identifier]
if os.path.exists(filename):
return InputFile(filename)
database = self.client["images-storage"]
images = database['images']
image = images.find_one({"identifier": identifier})
if not image:
raise LevelDoesNotExistError
image_content = image["content"]
image_filename = image["filename"]
if not os.path.exists(self.local_storage):
os.makedirs(self.local_storage)
with open(self.local_storage + image_filename, "wb") as file:
file.write(image_content)
self.images_cache[identifier] = image_filename
return InputFile(self.local_storage + image_filename)
|
the-stack_106_22224 | """Agenda related views."""
import datetime
import pytz
import vobject as vobject
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Q
from django.http import HttpResponse
from django.views import View
from django.views.generic import ListView
from icalendar import Calendar
from BookClub.helpers import *
from BookClub.models import Meeting, User
class AgendaView(LoginRequiredMixin, ListView):
"""Render all meetings in the user's agenda."""
template_name = 'meeting/agenda.html'
def get_queryset(self):
user = User.objects.get(id=self.request.user.id)
today = pytz.utc.localize(datetime.datetime.today())
subquery = Meeting.objects.filter(Q(members=user.id, meeting_time__gte=today)).order_by('meeting_time')
return subquery
def get_context_data(self, **kwargs):
"""Generate context data to be shown in the template."""
club_ids = get_memberships_with_access(self.request.user)
clubs = Club.objects.filter(id__in=club_ids)
context = super().get_context_data(**kwargs)
context['meetings'] = self.get_queryset()
today = datetime.date.today()
context['joined_today'] = Meeting.objects.filter(club__in=clubs, members=self.request.user.id, meeting_time__date=today).order_by('meeting_time')
context['joined_upcoming'] = Meeting.objects.filter(club__in=clubs, members=self.request.user.id, meeting_time__date__gt=today).order_by('meeting_time')
context['not_joined_today'] = Meeting.objects.filter(club__in=clubs, meeting_time__date=today).exclude(members=self.request.user.id).order_by('meeting_time')
context['not_joined_upcoming'] = Meeting.objects.filter(club__in=clubs, meeting_time__date__gt=today).exclude(members=self.request.user.id).order_by('meeting_time')
context['all_meetings_today'] = Meeting.objects.filter(club__in=clubs, meeting_time__date=today).order_by('meeting_time')
context['all_meetings_upcoming'] = Meeting.objects.filter(club__in=clubs, meeting_time__date__gt=today).order_by('meeting_time')
context['current_user'] = self.request.user
return context
class ExportCalendarView(View):
"""Allow the user to export their agenda."""
def get(self, *args, **kwargs):
user = User.objects.get(id=self.request.user.id)
today = pytz.utc.localize(datetime.datetime.today())
meetings = Meeting.objects.filter(Q(members=user.id, meeting_time__gte=today)).order_by('meeting_time')
cal = vobject.iCalendar()
cal.add('method').value = 'PUBLISH' # IE/Outlook needs this
for meeting in meetings:
vevent = cal.add('vevent')
vevent.add('dtstart').value = meeting.meeting_time
vevent.add('dtend').value = meeting.meeting_end_time
vevent.add('dtstamp').value = datetime.datetime.now(tz=pytz.UTC)
vevent.add('summary').value = meeting.title
vevent.add('uid').value = str(meeting.id)
vevent.add('location').value = meeting.location
vevent.add('description').value = meeting.description
icalstream = cal.serialize()
response = HttpResponse(icalstream, content_type='text/calendar')
response['Filename'] = 'agenda.ics' # IE needs this
response['Content-Disposition'] = 'attachment; filename=agenda.ics'
return response
|
the-stack_106_22225 | import asyncio
import logging
from bleak import BleakClient, BleakError, BleakScanner
class SensorTile():
def __init__(self, address):
self.address = address
self.client = BleakClient(self.address)
# A LiFo Queue will ensure that the most recent registered
# ST data is retrieved
self.data = asyncio.LifoQueue(maxsize=1)
async def BLE_connect(self):
# Connect to SensorTile
await self.client.connect()
# Ensure connection was established
assert self.client.is_connected, "ST is not connected"
print("\n\tConnected to SensorTile")
async def BLE_disconnect(self):
# Disconnect from SensorTile
await self.client.disconnect()
print("\tDisconnected from SensorTile.\n")
async def start_notification(self, char):
try:
await self.client.start_notify(char, self.notification_handler)
except Exception as e:
print(f"Error: {e}")
async def stop_notification(self, char):
try:
await self.client.stop_notify(char)
except Exception as e:
print(f"Error: {e}")
# Add data to Queue
async def notification_handler(self, handle, data):
# Add data to Queue
await self.data.put((handle, data))
async def find_ST():
print("\n\t##### Scanning BLE Devices #####\n")
search_for_ST = True
while search_for_ST:
# Find SensorTile address
address = await scan_ST_address()
# Break scan if ST address was found.
if verify_ST_address(address):
return address
x = input("\tWould you like to scan again? (y/n) ")
if x.lower() == "n":
return None
async def scan_ST_address():
try:
# Scan BLE devices
devices = await BleakScanner.discover()
print(f"\n\tFound {str(len(devices))} devices.")
# Find SensorTile
for d in devices:
if 'AM1V310' in d.name:
print("\n\tFound SensorTile with AM1V310 firmware.")
print(f"\tName: {d.name}\tAddress: {d.address}")
return d.address
except BleakError:
print("\n\tPlease turn on your system's bluetooth device.\n")
# A return value of True will halt the search.
def verify_ST_address(address):
# Verify that an address was retrieved
if address is None:
print("""
No SensorTile was found.
Please make sure your SensorTile is on.
If that does not work, ensure you flashed the correct firmware.
""")
return False
else:
return True
async def read_characteristic(client, char):
try:
return await client.read_gatt_char(char)
except Exception as e:
print(f"Error: {e}")
async def write_characteristic(client, char, data):
try:
await client.write_gatt_char(char, data)
except Exception as e:
print(f"Error: {e}")
async def write_descriptor(client, desc, data):
try:
await client.write_gatt_descriptor(desc, data)
except Exception as e:
print(f"Error: {e}")
async def get_data(self):
"""
Read all services, characteristics, and descriptors,
as well as their properties, and values.
REF: bleak/examples/service_explorer.py
"""
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
for service in self.client.services:
logger.info(f"[Service] {service}")
for char in service.characteristics:
if "read" in char.properties:
try:
value = bytes(await self.client.read_gatt_char(char.uuid))
logger.info(
f"\t[Characteristic] {char} ({','.join(char.properties)}), Value: {value}"
)
except Exception as e:
logger.error(
f"\t[Characteristic] {char} ({','.join(char.properties)}), Value: {e}"
)
else:
value = None
logger.info(
f"\t[Characteristic] {char} ({','.join(char.properties)}), Value: {value}"
)
for descriptor in char.descriptors:
try:
value = bytes(
await self.client.read_gatt_descriptor(descriptor.handle)
)
logger.info(f"\t\t[Descriptor] {descriptor}) | Value: {value}")
except Exception as e:
logger.error(f"\t\t[Descriptor] {descriptor}) | Value: {e}")
# def notification_handler(sender, data):
# """Simple notification handler which prints the data received."""
# result = struct.unpack_from("<hhhhhhhhhh", data)
# print("")
# print(data)
# print(result)
# # Acceleration
# acc_x = struct.unpack_from("<h", data[2:4])
# acc_y = struct.unpack_from("<h", data[4:6])
# acc_z = struct.unpack_from("<h", data[6:8])
# print(f"ACC X: {acc_x} ACC Y: {acc_y} ACC Z: {acc_z}")
# # Gyroscope
# gyr_x = struct.unpack_from("<h", data[8:10])
# gyr_y = struct.unpack_from("<h", data[10:12])
# gyr_z = struct.unpack_from("<h", data[12:14])
# print(f"GYR X: {gyr_x} GYR Y: {gyr_y} GYR Z: {gyr_z}")
# # Magnetometer
# mag_x = struct.unpack_from("<h", data[14:16])
# mag_y = struct.unpack_from("<h", data[16:18])
# mag_z = struct.unpack_from("<h", data[18:20])
# print(f"MAG X: {mag_x} MAG Y: {mag_y} MAG Z: {mag_z}") |
the-stack_106_22227 | import os
import json
import warnings
from io import open
from os.path import dirname
from os.path import abspath
from os.path import join as pathjoin
from .bids_validator import BIDSValidator
from .utils import _merge_event_files
from grabbit import Layout, File
from grabbit.external import six
from grabbit.utils import listify
__all__ = ['BIDSLayout']
class BIDSLayout(Layout):
''' Layout class representing an entire BIDS project.
Args:
path (str): The path specifying the root directory of the BIDS project.
config (list, str): An optional specification of the config(s) to
apply to the layout. If passed, must be one of:
- A dictionary containing config information.
- A string giving either the name of a valid built-in config (e.g.,
'bids' or 'derivatives'), or the path to a JSON file containing
the config.
- A tuple with 2 elements, where the first element is a string
that names the built-in config to use, and the second is either:
* A list or tuple of paths to which that config should apply
* A partial dictionary to merge into the named built-in config,
and can contain any key normally found in the config.
- A list, where each element can be any of the above.
At present, built-in domains include 'bids' and 'derivatives'.
validate (bool): If True, all files are checked for BIDS compliance
when first indexed, and non-compliant files are ignored. This
provides a convenient way to restrict file indexing to only those
files defined in the "core" BIDS spec, as setting validate=True
will lead files in supplementary folders like derivatives/, code/,
etc. to be ignored.
index_associated (bool): Argument passed onto the BIDSValidator;
ignored if validate = False.
include (str, list): String or list of strings giving paths to files or
directories to include in indexing. Note that if this argument is
passed, *only* files and directories that match at least one of the
patterns in the include list will be indexed. Cannot be used
together with 'exclude'.
include (str, list): String or list of strings giving paths to files or
directories to exclude from indexing. If this argument is passed,
all files and directories that match at least one of the patterns
in the include list will be ignored. Cannot be used together with
'include'.
kwargs: Optional keyword arguments to pass onto the Layout initializer
in grabbit.
'''
def __init__(self, path, config=None, validate=False,
index_associated=True, include=None, exclude=None, **kwargs):
self.validator = BIDSValidator(index_associated=index_associated)
self.validate = validate
# Determine which configs to load
conf_path = pathjoin(dirname(abspath(__file__)), 'config', '%s.json')
_all_doms = ['bids', 'derivatives']
if config is None:
config = ['bids', 'derivatives']
configs = []
def _load_config(conf):
if isinstance(conf, six.string_types):
if conf in _all_doms:
conf = conf_path % conf
conf = json.load(open(conf, 'r'))
return conf
for conf in listify(config):
if isinstance(conf, tuple):
_conf = _load_config(conf[0]).copy()
if isinstance(conf[1], dict):
_conf.update(conf[1])
else:
_conf['root'] = conf[1]
configs.append(_conf)
else:
configs.append(_load_config(conf))
# If 'bids' isn't in the list, the user probably made a mistake...
if not any([c['name'] != 'bids' for c in configs]):
warnings.warn("The core BIDS configuration was not included in the"
" config list. If you override the default value for"
" config, you probably want to make sure 'bids' is "
"included in the list of values.")
super(BIDSLayout, self).__init__(path, config=configs,
dynamic_getters=True, include=include,
exclude=exclude, **kwargs)
def __repr__(self):
n_sessions = len([session for isub in self.get_subjects()
for session in self.get_sessions(subject=isub)])
n_runs = len([run for isub in self.get_subjects()
for run in self.get_runs(subject=isub)])
n_subjects = len(self.get_subjects())
root = self.root[-30:]
s = "BIDS Layout: ...{} | Subjects: {} | Sessions: {} | Runs: {}".format(
root, n_subjects, n_sessions, n_runs)
return s
def _validate_file(self, f):
# If validate=True then checks files according to BIDS and
# returns False if file doesn't fit BIDS specification
if not self.validate:
return True
to_check = f.path
to_check = to_check.split(os.path.abspath(self.root), maxsplit=1)[1]
sep = os.path.sep
if to_check[:len(sep)] != sep:
to_check = sep + to_check
else:
None
return self.validator.is_bids(to_check)
def _get_nearest_helper(self, path, extension, type=None, **kwargs):
""" Helper function for grabbit get_nearest """
path = abspath(path)
if not type:
if 'type' not in self.files[path].entities:
raise ValueError(
"File '%s' does not have a valid type definition, most "
"likely because it is not a valid BIDS file." % path
)
type = self.files[path].entities['type']
tmp = self.get_nearest(path, extensions=extension, all_=True,
type=type, ignore_strict_entities=['type'],
**kwargs)
if len(tmp):
return tmp
else:
return None
def get_metadata(self, path, include_entities=False, **kwargs):
''' Returns metadata found in JSON sidecars for the specified file.
Args:
path (str): Path to the file to get metadata for.
kwargs (dict): Optional keyword arguments to pass onto
get_nearest().
Notes:
A dictionary containing metadata extracted from all matching .json
files is returned. In cases where the same key is found in multiple
files, the values in files closer to the input filename will take
precedence, per the inheritance rules in the BIDS specification.
'''
if include_entities:
entities = self.files[abspath(path)].entities
merged_param_dict = entities
else:
merged_param_dict = {}
potentialJSONs = self._get_nearest_helper(path, '.json', **kwargs)
if potentialJSONs is None:
return merged_param_dict
for json_file_path in reversed(potentialJSONs):
if os.path.exists(json_file_path):
param_dict = json.load(open(json_file_path, "r",
encoding='utf-8'))
merged_param_dict.update(param_dict)
return merged_param_dict
def get_bvec(self, path, **kwargs):
tmp = self._get_nearest_helper(path, 'bvec', type='dwi', **kwargs)[0]
if isinstance(tmp, list):
return tmp[0]
else:
return tmp
def get_bval(self, path, **kwargs):
tmp = self._get_nearest_helper(path, 'bval', type='dwi', **kwargs)[0]
if isinstance(tmp, list):
return tmp[0]
else:
return tmp
def get_events(self, path, return_type='file', derivatives='both',
**kwargs):
""" For a given file in a BIDS project, finds corresponding event files
and optionally returns merged dataframe containing all variables.
Args:
path (str): Path to a file to match to events.
return_type (str): Type of output to return.
'file' returns list of files,
'df' merges events into a single DataFrame, giving precedence
to events closer to the file.
derivatives (str): How to handle derivative events.
'ignore' - Ignore any event files outside of root directory.
'only' - Only include event files from outside directories.
'both' - Include both. Derivative events have precedence.
Returns:
List of file or merged Pandas dataframe.
"""
path = abspath(path)
# Get events in base Layout directory (ordered)
root_events = self._get_nearest_helper(
path, '.tsv', type='events', **kwargs) or []
entities = self.files[path].entities.copy()
if 'type' in entities:
entities.pop('type')
if 'modality' in entities and entities['modality'] == 'func':
entities.pop('modality')
entities.update(kwargs)
# Get all events
events = self.get(extensions='tsv', type='events',
return_type='file', **entities) or []
deriv_events = list(set(events) - set(root_events))
if derivatives == 'only':
events = deriv_events
elif derivatives == 'ignore':
events = root_events
else: # Combine with order
events = deriv_events + root_events
if return_type == 'df':
events = _merge_event_files(events)
elif not events:
return None
elif len(events) == 1:
return events[0]
return events
def get_fieldmap(self, path, return_list=False):
fieldmaps = self._get_fieldmaps(path)
if return_list:
return fieldmaps
else:
if len(fieldmaps) == 1:
return fieldmaps[0]
elif len(fieldmaps) > 1:
raise ValueError("More than one fieldmap found, but the "
"'return_list' argument was set to False. "
"Either ensure that there is only one "
"fieldmap for this image, or set the "
"'return_list' argument to True and handle "
"the result as a list.")
else: # len(fieldmaps) == 0
return None
def _get_fieldmaps(self, path):
sub = os.path.split(path)[1].split("_")[0].split("sub-")[1]
fieldmap_set = []
type_ = '(phase1|phasediff|epi|fieldmap)'
files = self.get(subject=sub, type=type_, extensions=['nii.gz', 'nii'])
for file in files:
metadata = self.get_metadata(file.filename)
if metadata and "IntendedFor" in metadata.keys():
if isinstance(metadata["IntendedFor"], list):
intended_for = metadata["IntendedFor"]
else:
intended_for = [metadata["IntendedFor"]]
if any([path.endswith(suffix) for suffix in intended_for]):
cur_fieldmap = {}
if file.type == "phasediff":
cur_fieldmap = {"phasediff": file.filename,
"magnitude1": file.filename.replace(
"phasediff", "magnitude1"),
"type": "phasediff"}
magnitude2 = file.filename.replace(
"phasediff", "magnitude2")
if os.path.isfile(magnitude2):
cur_fieldmap['magnitude2'] = magnitude2
elif file.type == "phase1":
cur_fieldmap["phase1"] = file.filename
cur_fieldmap["magnitude1"] = \
file.filename.replace("phase1", "magnitude1")
cur_fieldmap["phase2"] = \
file.filename.replace("phase1", "phase2")
cur_fieldmap["magnitude2"] = \
file.filename.replace("phase1", "magnitude2")
cur_fieldmap["type"] = "phase"
elif file.type == "epi":
cur_fieldmap["epi"] = file.filename
cur_fieldmap["type"] = "epi"
elif file.type == "fieldmap":
cur_fieldmap["fieldmap"] = file.filename
cur_fieldmap["magnitude"] = \
file.filename.replace("fieldmap", "magnitude")
cur_fieldmap["type"] = "fieldmap"
fieldmap_set.append(cur_fieldmap)
return fieldmap_set
def get_collections(self, level, types=None, variables=None, merge=False,
sampling_rate=None, **kwargs):
from bids.variables import load_variables
index = load_variables(self, types=types, levels=level, **kwargs)
return index.get_collections(level, variables, merge,
sampling_rate=sampling_rate)
def parse_entities(self, filelike):
if not isinstance(filelike, File):
filelike = File(filelike)
for ent in self.entities.values():
ent.matches(filelike)
return filelike.entities
|
the-stack_106_22230 | import torch
import torch.nn as nn
from ..utils.hyperparams import ACTIVATION_FN_FACTORY
class DenseDiscriminator(nn.Module):
def __init__(self, params):
"""
This class specifies the discriminator of an AAE.
It can be trained to distinguish real samples from a target distr.
(e.g. Gaussian, Uniform, Gaussian Mixture ...) from fake samples
constructed through the generator.
Args:
params (dict): A dict with the model parameters.
"""
super(DenseDiscriminator, self).__init__()
# Retrieve discriminator architecture
self.disc_hidden_sizes = params['discriminator_hidden_sizes']
self.input_size = params['input_size']
self.disc_activation_fn = ACTIVATION_FN_FACTORY[
params.get('discriminator_activation_fn', 'relu')]
self.disc_dropout = (
[params.get('discriminator_dropout', 0.0)] *
len(self.hidden_sizes)
if isinstance(params.get('discriminator_dropout', 0.0),
int) else params.get('discriminator_dropout', 0.0)
)
self._assertion_tests
# Build discriminator
num_units = [self.input_size] + self.disc_hidden_sizes
ops = []
for index in range(1, len(num_units)):
ops.append(nn.Linear(num_units[index - 1], num_units[index]))
ops.append(self.disc_activation_fn)
if self.disc_dropout[index - 1] > 0.0:
ops.append(nn.Dropout(p=self.disc_dropout[index - 1]))
ops.append(nn.Linear(num_units[-1], 1))
ops.append(nn.Sigmoid())
self.discriminator = nn.Sequential(*ops)
def forward(self, x):
"""The discriminator aiming to classify true and fake latent samples.
Args:
data (torch.Tensor) : Input data of shape batch_size x input_size.
Returns:
torch.Tensor: Logits, i.e. for each score a probability of being
from the real target distribution p(z)
of shape `[batch_size, 1]`.
"""
return self.discriminator(x)
def loss(self, real, fake):
"""
The discriminator loss is fixed to be the binary cross entropy of the
real and fake samples.
Args:
real (torch.Tensor): Discriminator logits for target distribution
samples. Vector of length `batch_size`.
fake (torch.Tensor): Discriminator logits for generator samples
(ideally 0.0). Vector of length `batch_size`.
Returns:
torch.Tensor: binary_cross_entropy(real, fake)
"""
return -torch.mean(torch.cat([torch.log(real), torch.log(1 - fake)]))
def _assertion_tests(self):
pass
def load(self, path):
"""Load model from path."""
weights = torch.load(path)
self.load_state_dict(weights)
def save(self, path):
"""Save model to path."""
torch.save(self.state_dict(), path)
|
the-stack_106_22232 | import math
angulo = float(input("Digite o ângulo que você deseja: "))
#convertendo o angulo que esta em graus para radianos, pois as funções que utilizas graus trabalham com os mesmos em radianos
anguloRad = angulo * 0.01745
# uma outra forma de converter o angulo é usando a função radians da biblioteca math
anguloNovo = math.radians(angulo)
seno = math.sin(anguloNovo)
coseno = math.cos(anguloRad)
tangente = math.tan(anguloRad)
print(f"O ângulo de {angulo:.1f} tem o SENO de {seno:.2f}")
print(f"O ângulo de {angulo:.1f} tem o COSENO de {coseno:.2f}")
print(f"O ângulo de {angulo:.1f} tem a TANGENTE de {tangente:.2f}")
|
the-stack_106_22233 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 TurboCoin
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid transactions.
In this test we connect to one node over p2p, and test tx requests."""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
)
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import TurbocoinTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
from data import invalid_txs
class InvalidTxRequestTest(TurbocoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def bootstrap_p2p(self, *, num_connections=1):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
for _ in range(num_connections):
self.nodes[0].add_p2p_connection(P2PDataStore())
def reconnect_p2p(self, **kwargs):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p(**kwargs)
def run_test(self):
node = self.nodes[0] # convenience reference to the node
self.bootstrap_p2p() # Add one p2p connection to the node
best_block = self.nodes[0].getbestblockhash()
tip = int(best_block, 16)
best_block_time = self.nodes[0].getblock(best_block)['time']
block_time = best_block_time + 1
self.log.info("Create a new block with an anyone-can-spend coinbase.")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
node.p2p.send_blocks_and_test([block], node, success=True)
self.log.info("Mature the block.")
self.nodes[0].generatetoaddress(100, self.nodes[0].get_deterministic_priv_key().address)
# Iterate through a list of known invalid transaction types, ensuring each is
# rejected. Some are consensus invalid and some just violate policy.
for BadTxTemplate in invalid_txs.iter_all_templates():
self.log.info("Testing invalid transaction: %s", BadTxTemplate.__name__)
template = BadTxTemplate(spend_block=block1)
tx = template.get_tx()
node.p2p.send_txs_and_test(
[tx], node, success=False,
expect_disconnect=template.expect_disconnect,
reject_reason=template.reject_reason,
)
if template.expect_disconnect:
self.log.info("Reconnecting to peer")
self.reconnect_p2p()
# Make two p2p connections to provide the node with orphans
# * p2ps[0] will send valid orphan txs (one with low fee)
# * p2ps[1] will send an invalid orphan tx (and is later disconnected for that)
self.reconnect_p2p(num_connections=2)
self.log.info('Test orphan transaction handling ... ')
# Create a root transaction that we withhold until all dependent transactions
# are sent out and in the orphan cache
SCRIPT_PUB_KEY_OP_TRUE = b'\x51\x75' * 15 + b'\x51'
tx_withhold = CTransaction()
tx_withhold.vin.append(CTxIn(outpoint=COutPoint(block1.vtx[0].sha256, 0)))
tx_withhold.vout.append(CTxOut(nValue=50 * COIN - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
tx_withhold.calc_sha256()
# Our first orphan tx with some outputs to create further orphan txs
tx_orphan_1 = CTransaction()
tx_orphan_1.vin.append(CTxIn(outpoint=COutPoint(tx_withhold.sha256, 0)))
tx_orphan_1.vout = [CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)] * 3
tx_orphan_1.calc_sha256()
# A valid transaction with low fee
tx_orphan_2_no_fee = CTransaction()
tx_orphan_2_no_fee.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 0)))
tx_orphan_2_no_fee.vout.append(CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
# A valid transaction with sufficient fee
tx_orphan_2_valid = CTransaction()
tx_orphan_2_valid.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 1)))
tx_orphan_2_valid.vout.append(CTxOut(nValue=10 * COIN - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
tx_orphan_2_valid.calc_sha256()
# An invalid transaction with negative fee
tx_orphan_2_invalid = CTransaction()
tx_orphan_2_invalid.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 2)))
tx_orphan_2_invalid.vout.append(CTxOut(nValue=11 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
self.log.info('Send the orphans ... ')
# Send valid orphan txs from p2ps[0]
node.p2p.send_txs_and_test([tx_orphan_1, tx_orphan_2_no_fee, tx_orphan_2_valid], node, success=False)
# Send invalid tx from p2ps[1]
node.p2ps[1].send_txs_and_test([tx_orphan_2_invalid], node, success=False)
assert_equal(0, node.getmempoolinfo()['size']) # Mempool should be empty
assert_equal(2, len(node.getpeerinfo())) # p2ps[1] is still connected
self.log.info('Send the withhold tx ... ')
with node.assert_debug_log(expected_msgs=["bad-txns-in-belowout"]):
node.p2p.send_txs_and_test([tx_withhold], node, success=True)
# Transactions that should end up in the mempool
expected_mempool = {
t.hash
for t in [
tx_withhold, # The transaction that is the root for all orphans
tx_orphan_1, # The orphan transaction that splits the coins
tx_orphan_2_valid, # The valid transaction (with sufficient fee)
]
}
# Transactions that do not end up in the mempool
# tx_orphan_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx)
# tx_orphan_invaid, because it has negative fee (p2ps[1] is disconnected for relaying that tx)
wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected
assert_equal(expected_mempool, set(node.getrawmempool()))
if __name__ == '__main__':
InvalidTxRequestTest().main()
|
the-stack_106_22234 | import os
from azure.storage.blob import BlobServiceClient
# Test connection string for Azurite (local development)
TEST_CONN_STR = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;"
"AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;"
"BlobEndpoint=http://azurite:10000/devstoreaccount1;"
# The Apache beam BlobStorageFileSystem expects the AZURE_STORAGE_CONNECTION_STRING environment variable
# to be set to the correct Azure Blob Storage connection string.
AZURE_BLOB_CONNECTION_STRING = (
os.environ.get("CODALAB_AZURE_BLOB_CONNECTION_STRING") or TEST_CONN_STR
)
os.environ['AZURE_STORAGE_CONNECTION_STRING'] = AZURE_BLOB_CONNECTION_STRING
client = BlobServiceClient.from_connection_string(AZURE_BLOB_CONNECTION_STRING)
# This is the account name of the account, which determines the first part of Azure URLs. For example,
# if AZURE_BLOB_ACCOUNT_NAME is equal to "devstoreaccount1", all Azure URLs for objects within that account
# will start with "azfs://"
AZURE_BLOB_ACCOUNT_NAME = client.account_name
|
the-stack_106_22236 | # coding: utf-8
import numpy as np
from kerasy.ML.sampling import GibbsMsphereSampler
def test_gibbs_msphere_sampling(target=0.15):
radius = 10
num_samples = 10000
dimension = 6
sampler = GibbsMsphereSampler(dimension=dimension, radius=radius)
sample = sampler.sample(num_samples, verbose=-1)
norm = np.sum(np.square(sample), axis=1)
actual = np.count_nonzero(norm <= (radius/2)**2)
ideal = ((1/2)**dimension) * num_samples
assert np.all(norm <= radius**2)
assert abs(actual/ideal-1) <= target
|
the-stack_106_22238 | import re
from unittest.mock import patch
import numpy as np
import pytest
from netcdf_scm.weights import (
CubeWeightCalculator,
InvalidWeights,
get_land_weights,
get_nh_weights,
get_weights_for_area,
multiply_weights,
subtract_weights,
)
@pytest.mark.parametrize("inp", ["fail string", np.array([[1, 2], [3, 4]])])
def test_get_land_mask_input_type_errors(test_all_cubes, inp):
error_msg = re.escape(r"cube must be an SCMCube instance")
masker = CubeWeightCalculator(test_all_cubes)
with pytest.raises(TypeError, match=error_msg):
get_land_weights(masker, test_all_cubes, sftlf_cube=inp)
def test_get_nh_mask(test_all_cubes):
masker = CubeWeightCalculator(test_all_cubes)
result = get_nh_weights(masker, test_all_cubes)
expected = np.array([[1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 0, 0]])
np.testing.assert_array_equal(result, expected)
def test_unknown_mask_error(test_all_cubes):
masker = CubeWeightCalculator(test_all_cubes)
with pytest.raises(InvalidWeights, match="Unknown weights: junk"):
masker.get_weights_array("junk")
@patch(
"netcdf_scm.weights.WEIGHTS_FUNCTIONS_WITHOUT_AREA_WEIGHTING",
{
"Junk": multiply_weights(get_weights_for_area(0, 0, 30, 50), "World|Land"),
"World|Land": get_land_weights,
"Inverse": subtract_weights("Junk", 1),
},
)
def test_no_match_error(test_all_cubes):
tmask_name = "Junk"
error_msg = re.escape(r"All weights are zero for region: `{}`".format(tmask_name))
weighter = CubeWeightCalculator(test_all_cubes)
for i in range(3): # make sure multiple asks still raises
# should be accessible without issue
weighter.get_weights_array("World|Land")
with pytest.raises(ValueError, match=error_msg):
weighter.get_weights_array("Junk")
# should be able to get inverse without problem
res = weighter.get_weights_array("Inverse")
# inverse of Junk should all be non-zero
assert not np.isclose(res, 0).any()
|
the-stack_106_22240 | """
Provide user facing operators for doing the split part of the
split-apply-combine paradigm.
"""
from typing import Tuple
import warnings
import numpy as np
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
ensure_categorical, is_categorical_dtype, is_datetime64_dtype, is_hashable,
is_list_like, is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.generic import ABCSeries
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, ExtensionArray
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.groupby.ops import BaseGrouper
from pandas.core.index import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.io.formats.printing import pprint_thing
class Grouper:
"""
A Grouper allows the user to specify a groupby instruction for a target
object
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
These are local specifications and will override 'global' settings,
that is the parameters axis and level which are passed to the groupby
itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`_.
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
closed : {'left' or 'right'}
Closed end of interval. Only when `freq` parameter is passed.
label : {'left' or 'right'}
Interval boundary to use for labeling.
Only when `freq` parameter is passed.
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex and `freq` parameter is passed.
base : int, default 0
Only when `freq` parameter is passed.
loffset : string, DateOffset, timedelta object
Only when `freq` parameter is passed.
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df.groupby(Grouper(key='A'))
Specify a resample operation on the column 'date'
>>> df.groupby(Grouper(key='date', freq='60s'))
Specify a resample operation on the level 'date' on the columns axis
with a frequency of 60s
>>> df.groupby(Grouper(level='date', freq='60s', axis=1))
"""
_attributes = ('key', 'level', 'freq', 'axis',
'sort') # type: Tuple[str, ...]
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.core.resample import TimeGrouper
cls = TimeGrouper
return super().__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.grouper = None
self.obj = None
self.indexer = None
self.binner = None
self._grouper = None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj, validate=True):
"""
Parameters
----------
obj : the subject object
validate : boolean, default True
if True, validate the grouper
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key],
axis=self.axis,
level=self.level,
sort=self.sort,
validate=validate)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : the subject object
sort : bool, default False
whether the resulting grouper should be sorted
"""
if self.key is not None and self.level is not None:
raise ValueError(
"The Grouper cannot specify both a key and a level!")
# Keep self.grouper value before overriding
if self._grouper is None:
self._grouper = self.grouper
# the key must be a valid info item
if self.key is not None:
key = self.key
# The 'on' is already defined
if (getattr(self.grouper, 'name', None) == key and
isinstance(obj, ABCSeries)):
ax = self._grouper.take(obj.index)
else:
if key not in obj._info_axis:
raise KeyError(
"The grouper name {0} is not found".format(key))
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax._get_level_values(level),
name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(
"The level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
# use stable sort to support first, last, nth
indexer = self.indexer = ax.argsort(kind='mergesort')
ax = ax.take(indexer)
obj = obj._take(indexer, axis=self.axis, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
@property
def groups(self):
return self.grouper.groups
def __repr__(self):
attrs_list = ("{}={!r}".format(attr_name, getattr(self, attr_name))
for attr_name in self._attributes
if getattr(self, attr_name) is not None)
attrs = ", ".join(attrs_list)
cls_name = self.__class__.__name__
return "{}({})".format(cls_name, attrs)
class Grouping:
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
observed : boolean, default False
If we are a Categorical, use the observed values
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True, observed=False, in_axis=False):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.all_grouper = None
self.index = index
self.sort = sort
self.obj = obj
self.observed = observed
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level {} not in index'.format(level))
level = index.names.index(level)
if self.name is None:
self.name = index.names[level]
self.grouper, self._labels, self._group_index = \
index._get_grouper_for_level(self.grouper, level)
# a passed Grouper like, directly get the grouper in the same way
# as single grouper groupby, use the group_info to get labels
elif isinstance(self.grouper, Grouper):
# get the new grouper; we already have disambiguated
# what key/level refer to exactly, don't need to
# check again as we have by this point converted these
# to an actual value (rather than a pd.Grouper)
_, grouper, _ = self.grouper._get_grouper(self.obj, validate=False)
if self.name is None:
self.name = grouper.result_index.name
self.obj = self.grouper.obj
self.grouper = grouper._get_grouper()
else:
if self.grouper is None and self.name is not None:
self.grouper = self.obj[self.name]
elif isinstance(self.grouper, (list, tuple)):
self.grouper = com.asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
from pandas.core.groupby.categorical import recode_for_groupby
self.grouper, self.all_grouper = recode_for_groupby(
self.grouper, self.sort, observed)
categories = self.grouper.categories
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._labels = self.grouper.codes
if observed:
codes = algorithms.unique1d(self.grouper.codes)
codes = codes[codes != -1]
if sort or self.grouper.ordered:
codes = np.sort(codes)
else:
codes = np.arange(len(categories))
self._group_index = CategoricalIndex(
Categorical.from_codes(
codes=codes,
categories=categories,
ordered=self.grouper.ordered))
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(self.grouper,
(Series, Index, ExtensionArray, np.ndarray)):
if getattr(self.grouper, 'ndim', 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(t))
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
if getattr(self.grouper, 'dtype', None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping({0})'.format(self.name)
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
# we have a list of groupers
if isinstance(self.grouper, BaseGrouper):
return self.grouper.indices
values = ensure_categorical(self.grouper)
return values._reverse_indexer()
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@cache_readonly
def result_index(self):
if self.all_grouper is not None:
from pandas.core.groupby.categorical import recode_from_groupby
return recode_from_groupby(self.all_grouper,
self.sort, self.group_index)
return self.group_index
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._labels is None or self._group_index is None:
# we have a list of groupers
if isinstance(self.grouper, BaseGrouper):
labels = self.grouper.label_info
uniques = self.grouper.result_index
else:
labels, uniques = algorithms.factorize(
self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
@cache_readonly
def groups(self):
return self.index.groupby(Categorical.from_codes(self.labels,
self.group_index))
def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
observed=False, mutated=False, validate=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If observed & we have a categorical grouper, only show the observed
values
If validate, then check for key/level overlaps
"""
group_axis = obj._get_axis(axis)
# validate that the passed single level is compatible with the passed
# axis of the object
if level is not None:
# TODO: These if-block and else-block are almost same.
# MultiIndex instance check is removable, but it seems that there are
# some processes only for non-MultiIndex in else-block,
# eg. `obj.index.name != level`. We have to consider carefully whether
# these are applicable for MultiIndex. Even if these are applicable,
# we need to check if it makes no side effect to subsequent processes
# on the outside of this condition.
# (GH 17621)
if isinstance(group_axis, MultiIndex):
if is_list_like(level) and len(level) == 1:
level = level[0]
if key is None and is_scalar(level):
# Get the level values from group_axis
key = group_axis.get_level_values(level)
level = None
else:
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError('No group keys passed!')
else:
raise ValueError('multiple levels only valid with '
'MultiIndex')
if isinstance(level, str):
if obj.index.name != level:
raise ValueError('level name {} is not the name of the '
'index'.format(level))
elif level > 0 or level < -1:
raise ValueError(
'level > 0 or level < -1 only valid with MultiIndex')
# NOTE: `group_axis` and `group_axis.get_level_values(level)`
# are same in this section.
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj, validate=False)
if key.key is None:
return grouper, [], obj
else:
return grouper, {key.key}, obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
# In the future, a tuple key will always mean an actual key,
# not an iterable of keys. In the meantime, we attempt to provide
# a warning. We can assume that the user wanted a list of keys when
# the key is not in the index. We just have to be careful with
# unhashble elements of `key`. Any unhashable elements implies that
# they wanted a list of keys.
# https://github.com/pandas-dev/pandas/issues/18314
is_tuple = isinstance(key, tuple)
all_hashable = is_tuple and is_hashable(key)
if is_tuple:
if ((all_hashable and key not in obj and set(key).issubset(obj))
or not all_hashable):
# column names ('a', 'b') -> ['a', 'b']
# arrays like (a, b) -> [a, b]
msg = ("Interpreting tuple 'by' as a list of keys, rather than "
"a single key. Use 'by=[...]' instead of 'by=(...)'. In "
"the future, a tuple will always mean a single key.")
warnings.warn(msg, FutureWarning, stacklevel=5)
key = list(key)
if not isinstance(key, list):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
# is this an index replacement?
if (not any_callable and not any_arraylike and not any_groupers and
match_axis_length and level is None):
if isinstance(obj, DataFrame):
all_in_columns_index = all(g in obj.columns or g in
obj.index.names for g in keys)
elif isinstance(obj, Series):
all_in_columns_index = all(g in obj.index.names for g in keys)
if not all_in_columns_index:
keys = [com.asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
# if the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
obj._check_label_or_level_ambiguity(gpr)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif obj._is_level_reference(gpr):
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.append(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
raise ValueError(
("Length of grouper ({len_gpr}) and axis ({len_axis})"
" must be same length"
.format(len_gpr=len(gpr), len_axis=obj.shape[axis])))
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = (Grouping(group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
observed=observed,
in_axis=in_axis)
if not isinstance(gpr, Grouping) else gpr)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj
def _is_label_like(val):
return (isinstance(val, (str, tuple)) or
(val is not None and is_scalar(val)))
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise ValueError('Grouper and axis must be same length')
return grouper
else:
return grouper
|
the-stack_106_22242 | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels and Gonzalo Espinoza
UNESCO-IHE 2016
Contact: [email protected]
[email protected]
Repository: https://github.com/wateraccounting/watools
Module: Collect/CMRSET
Restrictions:
The data and this python file may not be distributed to others without
permission of the WA+ team due data restriction of the CMRSET developers.
Description:
This script collects CMRSET data from the UNESCO-IHE FTP server. The data has a
monthly temporal resolution and a spatial resolution of 0.01 degree. The
resulting tiff files are in the WGS84 projection.
The data is available between 2000-01-01 till 2012-12-31.
Example:
from watools.Collect import CMRSET
CMRSET.monthly(Dir='C:/Temp/', Startdate='2003-02-24', Enddate='2003-03-09',
latlim=[50,54], lonlim=[3,7])
"""
from __future__ import print_function
# General modules
import numpy as np
import os
import pandas as pd
from ftplib import FTP
# Water Accounting Modules
import watools.WebAccounts as WebAccounts
import watools.General.raster_conversions as RC
def DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar):
"""
This scripts downloads CMRSET ET data from the UNESCO-IHE ftp server.
The output files display the total ET in mm for a period of one month.
The name of the file corresponds to the first day of the month.
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
lonlim -- [ymin, ymax] (values must be between -90 and 90)
latlim -- [xmin, xmax] (values must be between -180 and 180)
"""
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -90 or latlim[1] > 90:
print('Latitude above 90N or below -90S is not possible. Value set to maximum')
latlim[0] = np.max(latlim[0], -90)
latlim[1] = np.min(latlim[1], 90)
if lonlim[0] < -180 or lonlim[1] > 180:
print('Longitude must be between 180E and 180W. Now value is set to maximum')
lonlim[0] = np.max(lonlim[0],-180)
lonlim[1] = np.min(lonlim[1],180)
# Check Startdate and Enddate
if not Startdate:
Startdate = pd.Timestamp('2000-01-01')
if not Enddate:
Enddate = pd.Timestamp('2012-12-31')
# Creates dates library
Dates = pd.date_range(Startdate, Enddate, freq = "MS")
# Create Waitbar
if Waitbar == 1:
import watools.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = len(Dates)
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# Define directory and create it if not exists
output_folder = os.path.join(Dir, 'Evaporation', 'CMRSET', 'Monthly')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for Date in Dates:
# Define year and month
year = Date.year
month = Date.month
# Date as printed in filename
Filename_out= os.path.join(output_folder,'ETa_CMRSET_mm-month-1_monthly_%s.%02s.%02s.tif' %(Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d')))
# Define end filename
Filename_in = os.path.join("M01CMRSETGlobalY%dM%02d.tif" %(year, month))
# Temporary filename for the downloaded global file
local_filename = os.path.join(output_folder, Filename_in)
# Download the data from FTP server if the file not exists
if not os.path.exists(Filename_out):
try:
Download_CMRSET_from_WA_FTP(local_filename, Filename_in)
# Clip dataset
RC.clip_data(local_filename, latlim, lonlim, output_name=Filename_out)
os.remove(local_filename)
except:
print("Was not able to download file with date %s" %Date)
# Adjust waitbar
if Waitbar == 1:
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
return
def Download_CMRSET_from_WA_FTP(local_filename, Filename_in):
"""
This function retrieves CMRSET data for a given date from the
ftp.wateraccounting.unesco-ihe.org server.
Restrictions:
The data and this python file may not be distributed to others without
permission of the WA+ team due data restriction of the CMRSET developers.
Keyword arguments:
local_filename -- name of the temporary file which contains global CMRSET data
Filename_in -- name of the end file with the monthly CMRSET data
"""
# Collect account and FTP information
username, password = WebAccounts.Accounts(Type = 'FTP_WA')
ftpserver = "ftp.wateraccounting.unesco-ihe.org"
# Download data from FTP
ftp=FTP(ftpserver)
ftp.login(username,password)
directory="/WaterAccounting/Data_Satellite/Evaporation/CMRSET/Global/"
ftp.cwd(directory)
lf = open(local_filename, "wb")
ftp.retrbinary("RETR " + Filename_in, lf.write)
lf.close()
return
|
the-stack_106_22243 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet."""
from decimal import Decimal
import time
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
assert_fee_amount,
assert_raises_rpc_error,
connect_nodes,
wait_until,
)
from test_framework.wallet_util import test_address
class WalletTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.extra_args = [[
"-acceptnonstdtxn=1",
]] * self.num_nodes
self.setup_clean_chain = True
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
# Only need nodes 0-2 running at start of test
self.stop_node(3)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[0], 2)
self.sync_all(self.nodes[0:3])
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def get_vsize(self, txn):
return self.nodes[0].decoderawtransaction(txn)['vsize']
def run_test(self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all(self.nodes[0:3])
self.nodes[1].generate(101)
self.sync_all(self.nodes[0:3])
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
utxos = self.nodes[0].listunspent()
assert_equal(len(utxos), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("test gettxout")
confirmed_txid, confirmed_index = utxos[0]["txid"], utxos[0]["vout"]
# First, outputs that are unspent both in the chain and in the
# mempool should appear with or without include_mempool
txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=False)
assert_equal(txout['value'], 50)
txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=True)
assert_equal(txout['value'], 50)
# Send 21 SYS from 0 to 2 using sendtoaddress call.
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
mempool_txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.log.info("test gettxout (second part)")
# utxo spent in mempool should be visible if you exclude mempool
# but invisible if you include mempool
txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, False)
assert_equal(txout['value'], 50)
txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, True)
assert txout is None
# new utxo from mempool should be invisible if you exclude mempool
# but visible if you include mempool
txout = self.nodes[0].gettxout(mempool_txid, 0, False)
assert txout is None
txout1 = self.nodes[0].gettxout(mempool_txid, 0, True)
txout2 = self.nodes[0].gettxout(mempool_txid, 1, True)
# note the mempool tx will have randomly assigned indices
# but 10 will go to node2 and the rest will go to node0
balance = self.nodes[0].getbalance()
assert_equal(set([txout1['value'], txout2['value']]), set([10, balance]))
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all(self.nodes[0:3])
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
assert_raises_rpc_error(-8, "Invalid parameter, expected locked output", self.nodes[2].lockunspent, True, [unspent_0])
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises_rpc_error(-8, "Invalid parameter, output already locked", self.nodes[2].lockunspent, False, [unspent_0])
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
assert_raises_rpc_error(-8, "txid must be of length 64 (not 34, for '0000000000000000000000000000000000')",
self.nodes[2].lockunspent, False,
[{"txid": "0000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')",
self.nodes[2].lockunspent, False,
[{"txid": "ZZZ0000000000000000000000000000000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "Invalid parameter, unknown transaction",
self.nodes[2].lockunspent, False,
[{"txid": "0000000000000000000000000000000000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "Invalid parameter, vout index out of bounds",
self.nodes[2].lockunspent, False,
[{"txid": unspent_0["txid"], "vout": 999}])
# An output should be unlocked when spent
unspent_0 = self.nodes[1].listunspent()[0]
self.nodes[1].lockunspent(False, [unspent_0])
tx = self.nodes[1].createrawtransaction([unspent_0], { self.nodes[1].getnewaddress() : 1 })
tx = self.nodes[1].fundrawtransaction(tx)['hex']
tx = self.nodes[1].signrawtransactionwithwallet(tx)["hex"]
self.nodes[1].sendrawtransaction(tx)
assert_equal(len(self.nodes[1].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all(self.nodes[0:3])
# node0 should end up with 100 sys in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100 - 21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
outputs[self.nodes[2].getnewaddress()] = utxo["amount"] - 3
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransactionwithwallet(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(hexstring=txns_to_send[0]["hex"], maxfeerate=0)
self.nodes[1].sendrawtransaction(hexstring=txns_to_send[1]["hex"], maxfeerate=0)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all(self.nodes[0:3])
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 94)
# Verify that a spent output cannot be locked anymore
spent_0 = {"txid": node0utxos[0]["txid"], "vout": node0utxos[0]["vout"]}
assert_raises_rpc_error(-8, "Invalid parameter, expected unspent output", self.nodes[0].lockunspent, False, [spent_0])
# Send 10 SYS normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('84'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 SYS with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
# Sendmany 10 SYS
txid = self.nodes[2].sendmany('', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 SYS with subtract fee from amount
txid = self.nodes[2].sendmany('', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
self.start_node(3)
connect_nodes(self.nodes[0], 3)
self.sync_all()
# check if we can list zero value tx as available coins
# 1. create raw_tx
# 2. hex-changed one output to 0.0
# 3. sign and send
# 4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent(query_options={'minimumAmount': '49.998'})[0]
inputs = [{"txid": usp['txid'], "vout": usp['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
raw_tx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") # replace 11.11 with 0.0 (int32)
signed_raw_tx = self.nodes[1].signrawtransactionwithwallet(raw_tx)
decoded_raw_tx = self.nodes[1].decoderawtransaction(signed_raw_tx['hex'])
zero_value_txid = decoded_raw_tx['txid']
self.nodes[1].sendrawtransaction(signed_raw_tx['hex'])
self.sync_all()
self.nodes[1].generate(1) # mine a block
self.sync_all()
unspent_txs = self.nodes[0].listunspent() # zero value tx must be in listunspents output
found = False
for uTx in unspent_txs:
if uTx['txid'] == zero_value_txid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert found
# do some -walletbroadcast tests
self.stop_nodes()
self.start_node(0, ["-walletbroadcast=0"])
self.start_node(1, ["-walletbroadcast=0"])
self.start_node(2, ["-walletbroadcast=0"])
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[0], 2)
self.sync_all(self.nodes[0:3])
txid_not_broadcast = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
tx_obj_not_broadcast = self.nodes[0].gettransaction(txid_not_broadcast)
self.nodes[1].generate(1) # mine a block, tx should not be in there
self.sync_all(self.nodes[0:3])
assert_equal(self.nodes[2].getbalance(), node_2_bal) # should not be changed because tx was not broadcasted
# now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(tx_obj_not_broadcast['hex'])
self.nodes[1].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal += 2
tx_obj_not_broadcast = self.nodes[0].gettransaction(txid_not_broadcast)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
# create another tx
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
# restart the nodes with -walletbroadcast=1
self.stop_nodes()
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[0], 2)
self.sync_blocks(self.nodes[0:3])
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:3])
node_2_bal += 2
# tx should be added to balance because after restarting the nodes tx should be broadcast
assert_equal(self.nodes[2].getbalance(), node_2_bal)
# send a tx with value in a string (PR#6380 +)
txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
tx_obj = self.nodes[0].gettransaction(txid)
assert_equal(tx_obj['amount'], Decimal('-2'))
txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
tx_obj = self.nodes[0].gettransaction(txid)
assert_equal(tx_obj['amount'], Decimal('-0.0001'))
# check if JSON parser can handle scientific notation in strings
txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
tx_obj = self.nodes[0].gettransaction(txid)
assert_equal(tx_obj['amount'], Decimal('-0.0001'))
# General checks for errors from incorrect inputs
# This will raise an exception because the amount type is wrong
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4")
# This will raise an exception since generate does not accept a string
assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2")
# This will raise an exception for the invalid private key format
assert_raises_rpc_error(-5, "Invalid private key encoding", self.nodes[0].importprivkey, "invalid")
# This will raise an exception for importing an address with the PS2H flag
temp_address = self.nodes[1].getnewaddress("", "p2sh-segwit")
assert_raises_rpc_error(-5, "Cannot use the p2sh flag with an address - use a script instead", self.nodes[0].importaddress, temp_address, "label", False, True)
# This will raise an exception for attempting to dump the private key of an address you do not own
assert_raises_rpc_error(-3, "Address does not refer to a key", self.nodes[0].dumpprivkey, temp_address)
# This will raise an exception for attempting to get the private key of an invalid Syscoin address
assert_raises_rpc_error(-5, "Invalid Syscoin address", self.nodes[0].dumpprivkey, "invalid")
# This will raise an exception for attempting to set a label for an invalid Syscoin address
assert_raises_rpc_error(-5, "Invalid Syscoin address", self.nodes[0].setlabel, "invalid address", "label")
# This will raise an exception for importing an invalid address
assert_raises_rpc_error(-5, "Invalid Syscoin address or script", self.nodes[0].importaddress, "invalid")
# This will raise an exception for attempting to import a pubkey that isn't in hex
assert_raises_rpc_error(-5, "Pubkey must be a hex string", self.nodes[0].importpubkey, "not hex")
# This will raise an exception for importing an invalid pubkey
assert_raises_rpc_error(-5, "Pubkey is not a valid public key", self.nodes[0].importpubkey, "5361746f736869204e616b616d6f746f")
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all(self.nodes[0:3])
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert self.nodes[1].getaddressinfo(address_to_import)["iswatchonly"]
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
coinbase_addr = self.nodes[1].getnewaddress()
block_hash = self.nodes[0].generatetoaddress(1, coinbase_addr)[0]
coinbase_txid = self.nodes[0].getblock(block_hash)['tx'][0]
self.sync_all(self.nodes[0:3])
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(coinbase_txid)
# check if wallet or blockchain maintenance changes the balance
self.sync_all(self.nodes[0:3])
blocks = self.nodes[0].generate(2)
self.sync_all(self.nodes[0:3])
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].rpc.ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for label in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getnewaddress()
self.nodes[0].setlabel(addr, label)
test_address(self.nodes[0], addr, labels=[label])
assert label in self.nodes[0].listlabels()
self.nodes[0].rpc.ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
# disabled until issue is fixed: https://github.com/syscoin/syscoin/issues/7463
# '-salvagewallet',
]
chainlimit = 6
for m in maintenance:
self.log.info("check " + m)
self.stop_nodes()
# set lower ancestor limit for later
self.start_node(0, [m, "-limitancestorcount=" + str(chainlimit)])
self.start_node(1, [m, "-limitancestorcount=" + str(chainlimit)])
self.start_node(2, [m, "-limitancestorcount=" + str(chainlimit)])
if m == '-reindex':
# reindex will leave rpc warm up "early"; Wait for it to finish
wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
# ==Check that wallet prefers to use coins that don't exceed mempool limits =====
# Get all non-zero utxos together
chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
self.nodes[0].generate(1)
node0_balance = self.nodes[0].getbalance()
# Split into two chains
rawtx = self.nodes[0].createrawtransaction([{"txid": singletxid, "vout": 0}], {chain_addrs[0]: node0_balance / 2 - Decimal('0.01'), chain_addrs[1]: node0_balance / 2 - Decimal('0.01')})
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
singletxid = self.nodes[0].sendrawtransaction(hexstring=signedtx["hex"], maxfeerate=0)
self.nodes[0].generate(1)
# Make a long chain of unconfirmed payments without hitting mempool limit
# Each tx we make leaves only one output of change on a chain 1 longer
# Since the amount to send is always much less than the outputs, we only ever need one output
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
for i in range(chainlimit * 2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit * 2)
assert_equal(len(txid_list), chainlimit * 2)
# Without walletrejectlongchains, we will still generate a txid
# The tx will be stored in the wallet but not accepted to the mempool
extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))
assert extra_txid not in self.nodes[0].getrawmempool()
assert extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()]
self.nodes[0].abandontransaction(extra_txid)
total_txs = len(self.nodes[0].listtransactions("*", 99999))
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
self.stop_node(0)
self.start_node(0, extra_args=["-walletrejectlongchains", "-limitancestorcount=" + str(2 * chainlimit)])
# wait for loadmempool
timeout = 10
while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit * 2):
time.sleep(0.5)
timeout -= 0.5
assert_equal(len(self.nodes[0].getrawmempool()), chainlimit * 2)
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_rpc_error(-4, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*", 99999)))
# Test getaddressinfo on external address. Note that these addresses are taken from disablewallet.py
assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].getaddressinfo, "3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy")
address_info = self.nodes[0].getaddressinfo("mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ")
assert_equal(address_info['address'], "mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ")
assert_equal(address_info["scriptPubKey"], "76a9144e3854046c7bd1594ac904e4793b6a45b36dea0988ac")
assert not address_info["ismine"]
assert not address_info["iswatchonly"]
assert not address_info["isscript"]
assert not address_info["ischange"]
# Test getaddressinfo 'ischange' field on change address.
self.nodes[0].generate(1)
destination = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(destination, 0.123)
tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])
output_addresses = [vout['scriptPubKey']['addresses'][0] for vout in tx["vout"]]
assert len(output_addresses) > 1
for address in output_addresses:
ischange = self.nodes[0].getaddressinfo(address)['ischange']
assert_equal(ischange, address != destination)
if ischange:
change = address
self.nodes[0].setlabel(change, 'foobar')
assert_equal(self.nodes[0].getaddressinfo(change)['ischange'], False)
# Test gettransaction response with different arguments.
self.log.info("Testing gettransaction response with different arguments...")
self.nodes[0].setlabel(change, 'baz')
baz = self.nodes[0].listtransactions(label="baz", count=1)[0]
expected_receive_vout = {"label": "baz",
"address": baz["address"],
"amount": baz["amount"],
"category": baz["category"],
"vout": baz["vout"]}
expected_fields = frozenset({'amount', 'bip125-replaceable', 'confirmations', 'details', 'fee',
'hex', 'time', 'timereceived', 'trusted', 'txid', 'walletconflicts'})
verbose_field = "decoded"
expected_verbose_fields = expected_fields | {verbose_field}
self.log.debug("Testing gettransaction response without verbose")
tx = self.nodes[0].gettransaction(txid=txid)
assert_equal(set([*tx]), expected_fields)
assert_array_result(tx["details"], {"category": "receive"}, expected_receive_vout)
self.log.debug("Testing gettransaction response with verbose set to False")
tx = self.nodes[0].gettransaction(txid=txid, verbose=False)
assert_equal(set([*tx]), expected_fields)
assert_array_result(tx["details"], {"category": "receive"}, expected_receive_vout)
self.log.debug("Testing gettransaction response with verbose set to True")
tx = self.nodes[0].gettransaction(txid=txid, verbose=True)
assert_equal(set([*tx]), expected_verbose_fields)
assert_array_result(tx["details"], {"category": "receive"}, expected_receive_vout)
assert_equal(tx[verbose_field], self.nodes[0].decoderawtransaction(tx["hex"]))
if __name__ == '__main__':
WalletTest().main()
|
the-stack_106_22244 | #!/usr/bin/env python
import os
import sys
import glob
from functools import lru_cache
from django.db import transaction
import click
from openstates import metadata
from openstates.utils.django import init_django
from utils import (
get_data_dir,
get_jurisdiction_id,
get_all_abbreviations,
load_yaml,
legacy_districts,
role_is_active,
load_municipalities,
MAJOR_PARTIES,
)
class CancelTransaction(Exception):
pass
@lru_cache(128)
def cached_lookup(ModelCls, **kwargs):
return ModelCls.objects.get(**kwargs)
def update_subobjects(person, fieldname, objects, read_manager=None):
""" returns True if there are any updates """
# we need the default manager for this field in case we need to do updates
manager = getattr(person, fieldname)
# if a read_manager is passed, we'll use that for all read operations
# this is used for Person.memberships to ensure we don't wipe out committee memberships
if read_manager is None:
read_manager = manager
current_count = read_manager.count()
updated = False
# if counts differ, we need to do an update for sure
if current_count != len(objects):
updated = True
# check if all objects exist
if not updated:
qs = read_manager
for obj in objects:
qs = qs.exclude(**obj)
if qs.exists():
updated = True
# if there's been an update, wipe the old & insert the new
if updated:
if current_count:
read_manager.all().delete()
for obj in objects:
manager.create(**obj)
# save to bump updated_at timestamp
person.save()
return updated
def get_update_or_create(ModelCls, data, lookup_keys):
updated = created = False
kwargs = {k: data[k] for k in lookup_keys}
try:
obj = ModelCls.objects.get(**kwargs)
for field, value in data.items():
if getattr(obj, field) != value:
setattr(obj, field, value)
updated = True
if updated:
obj.save()
except ModelCls.DoesNotExist:
obj = ModelCls.objects.create(**data)
created = True
return obj, created, updated
def load_person(data):
# import has to be here so that Django is set up
from openstates.data.models import Person, Organization, Post
fields = dict(
id=data["id"],
name=data["name"],
given_name=data.get("given_name", ""),
family_name=data.get("family_name", ""),
gender=data.get("gender", ""),
biography=data.get("biography", ""),
birth_date=data.get("birth_date", ""),
death_date=data.get("death_date", ""),
image=data.get("image", ""),
extras=data.get("extras", {}),
)
person, created, updated = get_update_or_create(Person, fields, ["id"])
updated |= update_subobjects(person, "other_names", data.get("other_names", []))
updated |= update_subobjects(person, "links", data.get("links", []))
updated |= update_subobjects(person, "sources", data.get("sources", []))
identifiers = []
for scheme, value in data.get("ids", {}).items():
identifiers.append({"scheme": scheme, "identifier": value})
for identifier in data.get("other_identifiers", []):
identifiers.append(identifier)
updated |= update_subobjects(person, "identifiers", identifiers)
contact_details = []
for cd in data.get("contact_details", []):
for type in ("address", "email", "voice", "fax"):
if cd.get(type):
contact_details.append(
{"note": cd.get("note", ""), "type": type, "value": cd[type]}
)
updated |= update_subobjects(person, "contact_details", contact_details)
memberships = []
primary_party = ""
current_jurisdiction_id = None
current_role = None
for party in data.get("party", []):
party_name = party["name"]
try:
org = cached_lookup(Organization, classification="party", name=party["name"])
except Organization.DoesNotExist:
click.secho(f"no such party {party['name']}", fg="red")
raise CancelTransaction()
memberships.append(
{
"organization": org,
"start_date": party.get("start_date", ""),
"end_date": party.get("end_date", ""),
}
)
if role_is_active(party):
if primary_party in MAJOR_PARTIES and party_name in MAJOR_PARTIES:
raise ValueError(f"two primary parties for ({data['name']} {data['id']})")
elif primary_party in MAJOR_PARTIES:
# already set correct primary party, so do nothing
pass
else:
primary_party = party_name
for role in data.get("roles", []):
if role["type"] in ("mayor",):
role_name = "Mayor"
org_type = "government"
use_district = False
elif role["type"] == "governor":
role_name = "Governor"
if role["jurisdiction"] == "ocd-jurisdiction/country:us/district:dc/government":
role_name = "Mayor"
org_type = "executive"
use_district = False
elif role["type"] in ("secretary of state", "chief election officer"):
role_name = role["type"].title()
org_type = "executive"
use_district = False
elif role["type"] in ("upper", "lower", "legislature"):
org_type = role["type"]
use_district = True
else:
raise ValueError(f"unsupported role type: {role['type']}")
try:
org = cached_lookup(
Organization, classification=org_type, jurisdiction_id=role["jurisdiction"]
)
if use_district:
post = org.posts.get(label=role["district"])
else:
post = None
except Organization.DoesNotExist:
click.secho(
f"{person} no such organization {role['jurisdiction']} {org_type}", fg="red"
)
raise CancelTransaction()
except Post.DoesNotExist:
# if this is a legacy district, be quiet
lds = legacy_districts(jurisdiction_id=role["jurisdiction"])
if role["district"] not in lds[role["type"]]:
click.secho(f"no such post {role}", fg="red")
raise CancelTransaction()
else:
post = None
if role_is_active(role):
current_jurisdiction_id = role["jurisdiction"]
current_role = {"org_classification": org_type, "district": None, "division_id": None}
if use_district:
state_metadata = metadata.lookup(jurisdiction_id=role["jurisdiction"])
district = state_metadata.lookup_district(
name=str(role["district"]), chamber=role["type"]
)
assert district
current_role["division_id"] = district.division_id
current_role["title"] = getattr(state_metadata, role["type"]).title
# try to force district to an int for sorting, but allow strings for non-numeric districts
try:
current_role["district"] = int(role["district"])
except ValueError:
current_role["district"] = str(role["district"])
else:
current_role["title"] = role_name
elif not current_jurisdiction_id:
current_jurisdiction_id = role["jurisdiction"]
membership = {
"organization": org,
"post": post,
"start_date": role.get("start_date", ""),
"end_date": role.get("end_date", ""),
}
if not use_district:
membership["role"] = role_name
memberships.append(membership)
# note that we don't manage committee memberships here
updated |= update_subobjects(
person,
"memberships",
memberships,
read_manager=person.memberships.exclude(organization__classification="committee"),
)
# set computed fields (avoid extra save)
if (
person.primary_party != primary_party
or person.current_role != current_role
or person.current_jurisdiction_id != current_jurisdiction_id
):
person.primary_party = primary_party
person.current_role = current_role
person.current_jurisdiction_id = current_jurisdiction_id
person.save()
return created, updated
def _echo_org_status(org, created, updated):
if created:
click.secho(f"{org} created", fg="green")
elif updated:
click.secho(f"{org} updated", fg="yellow")
def load_directory(files, type, jurisdiction_id, purge):
ids = set()
merged = {}
created_count = 0
updated_count = 0
if type == "person":
from openstates.data.models import Person, BillSponsorship, PersonVote
existing_ids = set(
Person.objects.filter(
memberships__organization__jurisdiction_id=jurisdiction_id
).values_list("id", flat=True)
)
ModelCls = Person
load_func = load_person
else:
raise ValueError(type)
all_data = []
for filename in files:
with open(filename) as f:
data = load_yaml(f)
all_data.append((data, filename))
for data, filename in all_data:
ids.add(data["id"])
created, updated = load_func(data)
if created:
click.secho(f"created {type} from {filename}", fg="cyan", bold=True)
created_count += 1
elif updated:
click.secho(f"updated {type} from {filename}", fg="cyan")
updated_count += 1
missing_ids = existing_ids - ids
# check if missing ids are in need of a merge
for missing_id in missing_ids:
try:
found = ModelCls.objects.get(
identifiers__identifier=missing_id, identifiers__scheme="openstates"
)
merged[missing_id] = found.id
except ModelCls.DoesNotExist:
pass
if merged:
click.secho(f"{len(merged)} removed via merge", fg="yellow")
for old, new in merged.items():
click.secho(f" {old} => {new}", fg="yellow")
BillSponsorship.objects.filter(person_id=old).update(person_id=new)
PersonVote.objects.filter(voter_id=old).update(voter_id=new)
ModelCls.objects.filter(id=old).delete()
missing_ids.remove(old)
# ids that are still missing would need to be purged
if missing_ids and not purge:
click.secho(f"{len(missing_ids)} went missing, run with --purge to remove", fg="red")
for id in missing_ids:
mobj = ModelCls.objects.get(pk=id)
click.secho(f" {id}: {mobj}")
raise CancelTransaction()
elif missing_ids and purge:
click.secho(f"{len(missing_ids)} purged", fg="yellow")
ModelCls.objects.filter(id__in=missing_ids).delete()
click.secho(
f"processed {len(ids)} {type} files, {created_count} created, " f"{updated_count} updated",
fg="green",
)
def create_parties():
from openstates.data.models import Organization
settings_file = os.path.join(os.path.dirname(__file__), "../settings.yml")
with open(settings_file) as f:
settings = load_yaml(f)
parties = settings["parties"]
for party in parties:
org, created = Organization.objects.get_or_create(name=party, classification="party")
if created:
click.secho(f"created party: {party}", fg="green")
def create_municipalities(jurisdictions):
from openstates.data.models import Jurisdiction, Organization
for jurisdiction in jurisdictions:
j, created = Jurisdiction.objects.get_or_create(
id=jurisdiction["id"], name=jurisdiction["name"], classification="municipality"
)
if created:
click.secho(f"created jurisdiction: {j.name}", fg="green")
o, created = Organization.objects.get_or_create(
jurisdiction=j, classification="government", name=f"{jurisdiction['name']} Government"
)
if created:
click.secho(f"created organization: {o.name}", fg="green")
@click.command()
@click.argument("abbreviations", nargs=-1)
@click.option(
"--purge/--no-purge", default=False, help="Purge all legislators from DB that aren't in YAML."
)
@click.option(
"--safe/--no-safe",
default=False,
help="Operate in safe mode, no changes will be written to database.",
)
def to_database(abbreviations, purge, safe):
"""
Sync YAML files to DB.
"""
init_django()
create_parties()
if not abbreviations:
abbreviations = get_all_abbreviations()
for abbr in abbreviations:
click.secho("==== {} ====".format(abbr), bold=True)
directory = get_data_dir(abbr)
jurisdiction_id = get_jurisdiction_id(abbr)
municipalities = load_municipalities(abbr)
with transaction.atomic():
create_municipalities(municipalities)
person_files = (
glob.glob(os.path.join(directory, "legislature/*.yml"))
+ glob.glob(os.path.join(directory, "executive/*.yml"))
+ glob.glob(os.path.join(directory, "municipalities/*.yml"))
+ glob.glob(os.path.join(directory, "retired/*.yml"))
)
if safe:
click.secho("running in safe mode, no changes will be made", fg="magenta")
try:
with transaction.atomic():
load_directory(person_files, "person", jurisdiction_id, purge=purge)
if safe:
click.secho("ran in safe mode, no changes were made", fg="magenta")
raise CancelTransaction()
except CancelTransaction:
sys.exit(1)
if __name__ == "__main__":
to_database()
|
the-stack_106_22245 | from core.helpers import Url
from core.helpers import Comparisons
from core.library import Manage
import json
import core
import datetime
from core import searcher
import xml.etree.cElementTree as ET
import re
import logging
logging = logging.getLogger(__name__)
searcher = searcher
date_format = '%a, %d %b %Y %H:%M:%S'
trakt_date_format = '%Y-%m-%dT%H:%M:%S'
def sync():
''' Syncs all enabled Trakt lists and rss lists
Gets list of movies from each enabled Trakt lists
Adds missing movies to library as Waiting/Default
Returns bool for success/failure
'''
logging.info('Syncing Trakt lists.')
success = True
min_score = core.CONFIG['Search']['Watchlists']['traktscore']
length = core.CONFIG['Search']['Watchlists']['traktlength']
movies = []
if core.CONFIG['Search']['Watchlists']['traktrss']:
sync_rss()
for k, v in core.CONFIG['Search']['Watchlists']['Traktlists'].items():
if v is False:
continue
movies += [i for i in get_list(k, min_score=min_score, length=length) if i not in movies]
library = [i['imdbid'] for i in core.sql.get_user_movies()]
movies = [i for i in movies if ((i['ids']['imdb'] not in library) and (i['ids']['imdb'] != 'N/A'))]
logging.info('Found {} new movies from Trakt lists.'.format(len(movies)))
for i in movies:
imdbid = i['ids']['imdb']
logging.info('Adding movie {} {} from Trakt'.format(i['title'], imdbid))
added = Manage.add_movie({'id': i['ids']['tmdb'],
'imdbid': i['ids']['imdb'],
'title': i['title'],
'origin': 'Trakt'})
try:
if added['response'] and core.CONFIG['Search']['searchafteradd'] and i['year'] != 'N/A':
searcher.search(imdbid, i['title'], i['year'], core.config.default_profile())
except Exception as e:
logging.error('Movie {} did not get added.'.format(i['title']), exc_info=False)
return success
def sync_rss():
''' Gets list of new movies in user's rss feed(s)
Returns list of movie dicts
'''
try:
record = json.loads(core.sql.system('trakt_sync_record'))
except Exception as e:
record = {}
for url in core.CONFIG['Search']['Watchlists']['traktrss'].split(','):
list_id = url.split('.atom')[0].split('/')[-1]
last_sync = record.get(list_id) or 'Sat, 01 Jan 2000 00:00:00'
last_sync = datetime.datetime.strptime(last_sync, date_format)
logging.info('Syncing Trakt RSS watchlist {}. Last sync: {}'.format(list_id, last_sync))
try:
feed = Url.open(url).text
feed = re.sub(r'xmlns=".*?"', r'', feed)
root = ET.fromstring(feed)
except Exception as e:
logging.error('Trakt rss request:\n{}'.format(feed), exc_info=True)
continue
d = root.find('updated').text[:19]
do = datetime.datetime.strptime(d, trakt_date_format)
record[list_id] = datetime.datetime.strftime(do, date_format)
for entry in root.iter('entry'):
try:
pub = datetime.datetime.strptime(entry.find('published').text[:19], trakt_date_format)
if last_sync >= pub:
break
else:
t = entry.find('title').text
title = ' ('.join(t.split(' (')[:-1])
year = ''
for i in t.split(' (')[-1]:
if i.isdigit():
year += i
year = int(year)
logging.info('Searching TheMovieDatabase for {} {}'.format(title, year))
movie = Manage.tmdb._search_title('{} {}'.format(title, year))[0]
if movie:
movie['origin'] = 'Trakt'
logging.info('Found new watchlist movie {} {}'.format(title, year))
r = Manage.add_movie(movie)
if r['response'] and core.CONFIG['Search']['searchafteradd'] and movie['year'] != 'N/A':
searcher.search(movie['imdbid'], movie['title'], movie['year'], core.config.default_profile())
else:
logging.warning('Unable to find {} {} on TheMovieDatabase'.format(title, year))
except Exception as e:
logging.error('Unable to parse Trakt RSS list entry.', exc_info=True)
logging.info('Storing last synced date.')
if core.sql.row_exists('SYSTEM', name='trakt_sync_record'):
core.sql.update('SYSTEM', 'data', json.dumps(record), 'name', 'trakt_sync_record')
else:
core.sql.write('SYSTEM', {'data': json.dumps(record), 'name': 'trakt_sync_record'})
logging.info('Trakt RSS sync complete.')
def get_list(list_name, min_score=0, length=10):
''' Gets list of trending movies from Trakt
list_name (str): name of Trakt list. Must be one of ('trending', 'popular', 'watched', 'collected', 'anticipated', 'boxoffice')
min_score (float): minimum score to accept (max 10) <optional - default 0>
length (int): how many results to get from Trakt <optional - default 10>
Length is applied before min_score, so actual result count
can be less than length
Returns list of dicts of movie info
'''
logging.info('Getting Trakt list {}'.format(list_name))
headers = {'Content-Type': 'application/json',
'trakt-api-version': '2',
'trakt-api-key': Comparisons._k(b'trakt')
}
if list_name not in ('trending', 'popular', 'watched', 'collected', 'anticipated', 'boxoffice'):
logging.error('Invalid list_name {}'.format(list_name))
return []
url = 'https://api.trakt.tv/movies/{}/?extended=full'.format(list_name)
try:
r = Url.open(url, headers=headers)
if r.status_code != 200:
return []
m = json.loads(r.text)[:length]
if list_name == 'popular':
return [i for i in m if i['rating'] >= min_score]
return [i['movie'] for i in m if i['movie']['rating'] >= min_score]
except Exception as e:
logging.error('Unable to get Trakt list.', exc_info=True)
return []
|
the-stack_106_22249 | from __future__ import print_function
import os
import shutil
import sys
import tempfile
from tempfile import NamedTemporaryFile
import uuid
import yaml
from subprocess import Popen, PIPE
class Tpm2(object):
def __init__(self, tmp):
self._tmp = tmp
def createprimary(self, ownerauth, objauth):
ctx = os.path.join(self._tmp, "context.out")
cmd = [
'tpm2_createprimary', '-p', 'hex:%s' % objauth.decode(), '-o', ctx,
'-g', 'sha256', '-G', 'rsa'
]
if ownerauth and len(ownerauth) > 0:
cmd.extend(['-P', ownerauth])
p = Popen(cmd, stdout=PIPE, stderr=PIPE, env=os.environ)
_, stderr = p.communicate()
if (p.wait()):
raise RuntimeError("Could not execute tpm2_createprimary: %s" %
stderr)
return ctx
@staticmethod
def evictcontrol(ownerauth, ctx):
cmd = ['tpm2_evictcontrol', '-c', str(ctx)]
if ownerauth and len(ownerauth) > 0:
cmd.extend(['-P', ownerauth])
p = Popen(cmd, stdout=PIPE, stderr=PIPE, env=os.environ)
stdout, stderr = p.communicate()
y = yaml.safe_load(stdout)
rc = p.wait()
handle = y['persistent-handle'] if rc == 0 else None
if (p.wait()):
raise RuntimeError("Could not execute tpm2_evictcontrol: %s",
stderr)
return handle
def load(self, pctx, pauth, priv, pub):
if priv != None and not isinstance(priv, str):
sealprivf = NamedTemporaryFile()
sealprivf.write(priv)
sealprivf.flush()
priv = sealprivf.name
if not isinstance(pub, str):
sealpubf = NamedTemporaryFile()
sealpubf.write(pub)
sealpubf.flush()
pub = sealpubf.name
ctx = os.path.join(self._tmp, uuid.uuid4().hex + '.out')
#tpm2_load -C $file_primary_key_ctx -u $file_load_key_pub -r $file_load_key_priv -n $file_load_key_name -o $file_load_key_ctx
if priv != None:
cmd = [
'tpm2_load', '-C', str(pctx), '-P', 'hex:' + pauth.decode(), '-u',
pub, '-r', priv, '-n', '/dev/null', '-o', ctx
]
else:
cmd = [
'tpm2_loadexternal', '-u', pub
]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, env=os.environ)
_, stderr = p.communicate()
rc = p.wait()
if rc:
raise RuntimeError("Could not execute tpm2_load: %s", stderr)
return ctx
def unseal(self, ctx, auth):
# tpm2_unseal -Q -c $file_unseal_key_ctx
cmd = ['tpm2_unseal', '-c', ctx, '-p', 'hex:' + auth.decode()]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, env=os.environ)
stdout, stderr = p.communicate()
rc = p.wait()
if rc:
raise RuntimeError("Could not execute tpm2_unseal: %s", stderr)
return stdout
def _encryptdecrypt(self, ctx, auth, data, decrypt=False):
cmd = ['tpm2_encryptdecrypt', '-c', ctx, '-p', 'hex:' + auth.decode()]
if decrypt:
cmd.extend(['-D'])
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE, env=os.environ)
stdout, stderr = p.communicate(input=data)
rc = p.wait()
if rc:
raise RuntimeError("Could not execute tpm2_encryptdecrypt: %s",
stderr)
return stdout
def encrypt(self, ctx, auth, data):
return self._encryptdecrypt(ctx, auth, data)
def decrypt(self, ctx, auth, data):
return self._encryptdecrypt(ctx, auth, data, decrypt=True)
def create(self,
phandle,
pauth,
objauth,
objattrs=None,
seal=None,
alg=None):
# tpm2_create -Q -C context.out -g $gAlg -G $GAlg -u key.pub -r key.priv
_, priv = tempfile.mkstemp(prefix='', suffix='.priv', dir=self._tmp)
_, pub = tempfile.mkstemp(prefix='', suffix='.pub', dir=self._tmp)
cmd = ['tpm2_create', '-C', str(phandle), '-u', pub, '-r', priv]
if pauth and len(pauth) > 0:
cmd.extend(['-P', 'hex:%s' % pauth.decode()])
if objauth and len(objauth) > 0:
cmd.extend(['-p', 'hex:%s' % objauth.decode()])
if objattrs != None:
cmd.extend(['-b', objattrs])
if seal != None:
cmd.extend(['-i', '-'])
if alg != None:
cmd.extend(['-G', alg])
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE, env=os.environ)
stdout, stderr = p.communicate(input=seal)
rc = p.wait()
if (rc != 0):
os.remove(pub)
os.remove(priv)
raise RuntimeError("Could not execute tpm2_create: %s" %
str(stderr))
return priv, pub, stdout
def getcap(self, cap):
# tpm2_getcap -Q -l $cap
cmd = ['tpm2_getcap', '-c', cap]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, env=os.environ)
stdout, stderr = p.communicate()
rc = p.wait()
if rc:
raise RuntimeError("Could not execute tpm2_getcap: %s", stderr)
return stdout
def importkey(self,
phandle,
pauth,
objauth,
privkey,
objattrs=None,
seal=None,
alg=None):
if privkey and len(privkey) > 0:
exists = os.path.isfile(privkey)
if not exists:
raise RuntimeError("File '%s' path is invalid or is missing",
privkey)
else:
sys.exit("Invalid file path")
_, priv = tempfile.mkstemp(prefix='', suffix='.priv', dir=self._tmp)
_, pub = tempfile.mkstemp(prefix='', suffix='.pub', dir=self._tmp)
parent_path = "file:" + str(phandle)
cmd = [
'tpm2_import', '-V', '-C', parent_path, '-i', privkey, '-u', pub,
'-r', priv
]
if pauth and len(pauth) > 0:
cmd.extend(['-P', 'hex:%s' % pauth.decode()])
if objauth and len(objauth) > 0:
cmd.extend(['-p', 'hex:%s' % objauth.decode()])
if objattrs != None:
cmd.extend(['-b', objattrs])
if seal != None:
cmd.extend(['-i', '-'])
if alg != None:
cmd.extend(['-G', alg])
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE, env=os.environ)
stdout, stderr = p.communicate(input=seal)
rc = p.wait()
if (rc != 0):
os.remove(pub)
os.remove(priv)
raise RuntimeError("Could not execute tpm2_import: %s" %
str(stderr))
return priv, pub, stdout
def changeauth(self, pctx, objctx, oldobjauth, newobjauth):
newpriv = os.path.join(self._tmp, uuid.uuid4().hex + '.priv')
#tpm2_load -C $file_primary_key_ctx -u $file_load_key_pub -r $file_load_key_priv -n $file_load_key_name -o $file_load_key_ctx
cmd = [
'tpm2_changeauth', '-C', str(pctx), '-c', str(objctx), '-P',
'hex:' + oldobjauth.decode(), '-p', 'hex:' + newobjauth.decode(),
'-r', newpriv
]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, env=os.environ)
_, stderr = p.communicate()
rc = p.wait()
if rc:
raise RuntimeError("Could not execute tpm2_load: %s", stderr)
return newpriv
|
the-stack_106_22250 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from iminuit.color import Gradient
__all__ = ['LatexTable']
class LatexTable:
"""Latex table output.
"""
float_format = '%10.5g'
int_format = '%d'
latex_kwd = [
'alpha', 'beta', 'gamma',
'delta', 'epsilon', 'zeta',
'eta', 'theta', 'iota',
'kappa', 'lambda', 'mu',
'nu', 'xi', 'omicron',
'pi', 'rho', 'sigma',
'tau', 'upsilon', 'phi',
'chi', 'psi', 'omega',
'Alpha', 'Beta', 'Gamma',
'Delta', 'Epsilon', 'Zeta',
'Eta', 'Theta', 'Iota',
'Kappa', 'Lambda', 'Mu',
'Nu', 'Xi', 'Omicron',
'Pi', 'Rho', 'Sigma',
'Tau', 'Upsilon', 'Phi',
'Chi', 'Psi', 'Omega',
]
def __init__(self, data, headers=None, smart_latex=True,
escape_under_score=True, alignment=None, rotate_header=False,
latex_map=None):
# Make sure #data columns matches #header columns, using any non-zero
# length if data or headers are missing
if len(data) > 0:
num_col = len(data[0])
if headers:
assert num_col == len(headers)
else:
if headers is not None:
num_col = len(headers)
else:
# LaTeX requires at least one column
num_col = 1
self.headers = headers
self.data = data
self.num_col = num_col
self.smart_latex = smart_latex
self.escape_under_score = escape_under_score
self.alignment = self._auto_align() if alignment is None else alignment
self.rotate_header = rotate_header
self.latex_map = {} if latex_map is None else latex_map
self.cell_color = {} # map of tuple (i,j)=>(r, g, b) #i,j include header
def _auto_align(self):
return '|' + 'c|' * self.num_col
def _format(self, s):
if s in self.latex_map:
return self.latex_map[s]
elif isinstance(s, float):
return self.float_format % s
elif isinstance(s, int):
return self.int_format % s
elif self.smart_latex:
return self._convert_smart_latex(s)
elif self.escape_under_score:
return s.replace('_', r'\_')
else:
return s
def _convert_smart_latex(self, s):
"""convert greek symbol to latex one
transform
a to $a$ if a is greek letter else just a
a_xxx to $a_{xxx}$ and
a_xxx_yyy_zzz to a xxx $yyy_{zzz}$
"""
# FIXME: implement this
parts = s.split('_')
if len(parts) == 1: # a to $a$ if a is greek letter else just a
if parts[0] in self.latex_kwd:
return r'$\%s$' % str(parts[0])
else:
return str(parts[0])
elif len(parts) == 2: # a_xxx to $a_{xxx}$ and
first = '\\%s' % parts[0] if parts[0] in self.latex_kwd else parts[0]
second = '\\%s' % parts[1] if parts[1] in self.latex_kwd else parts[1]
return r'$%s_{%s}$' % (first, second)
else: # a_xxx_yyy_zzz to a xxx $yyy_{zzz}$
textpart = map(self._convert_smart_latex, parts[:-2])
textpart = ' '.join(textpart)
latexpart = self._convert_smart_latex('_'.join(parts[-2:]))
return textpart + ' ' + latexpart
def set_cell_color(self, i, j, c):
"""colorize i,j cell with rgb color tuple c
Note that i,j index includes header.
i=0 is header if header is present. If header is not present then
i=0 refer to first data row.
"""
self.cell_color[(i, j)] = c
def _prepare(self): # return list of list
ret = []
if self.headers:
tmp = list(map(self._format, self.headers))
if self.rotate_header:
tmp = list(map(lambda x: '\\rotatebox{90}{%s}' % x, tmp))
ret.append(tmp)
for x in self.data:
ret.append(list(map(self._format, x)))
return ret
def __str__(self):
hline = '\\hline\n'
ret = ''
if len(self.cell_color) != 0:
ret += '%\\usepackage[table]{xcolor} % include this for color\n'
ret += '%\\usepackage{rotating} % include this for rotate header\n'
ret += '%\\documentclass[xcolor=table]{beamer} % for beamer\n'
ret += '\\begin{tabular}{%s}\n' % self.alignment
ret += hline
tdata = self._prepare()
# decorate it
for (i, j), c in self.cell_color.items():
xcolor = '[RGB]{%d,%d,%d}' % (c[0], c[1], c[2])
tdata[i][j] = '\\cellcolor' + xcolor + ' ' + tdata[i][j]
for line in tdata:
ret += ' & '.join(line) + '\\\\\n'
ret += hline
ret += '\\end{tabular}\n'
return ret.strip()
class LatexFactory:
@classmethod
def build_matrix(cls, vnames, matrix, latex_map=None):
"""build latex correlation matrix"""
# ret_link = '<a onclick="$(\'#%s\').toggle()" href="#">Show Latex</a>'%uid
headers = [''] + list(vnames)
data = []
color = {}
for i, v1 in enumerate(vnames):
tmp = [v1]
for j, v2 in enumerate(vnames):
m = matrix[i][j]
tmp.append(m)
color[(i + 1, j + 1)] = Gradient.color_for(abs(m))
# +1 for header on the side and top
data.append(tmp)
table = LatexTable(headers=headers, data=data, rotate_header=True,
latex_map=latex_map)
table.float_format = '%.2g'
for (i, j), c in color.items():
table.set_cell_color(i, j, c)
return table
@classmethod
def build_param_table(cls, mps, merr=None, float_format='%5.3e',
smart_latex=True, latex_map=None):
"""build latex parameter table"""
headers = ['', 'Name', 'Value', 'Hesse Error', 'Minos Error-',
'Minos Error+', 'Limit-', 'Limit+', 'Fixed?', ]
data = []
for i, mp in enumerate(mps):
minos_p, minos_m = ('', '') if merr is None or mp.name not in merr else \
('%g' % merr[mp.name].upper, '%g' % merr[mp.name].lower)
limit_p = '' if mp.upper_limit is None else '%g' % mp.upper_limit
limit_m = '' if mp.lower_limit is None else '%s' % mp.lower_limit
fixed = 'Yes' if mp.is_fixed else 'No'
tmp = [
i,
mp.name,
'%g' % mp.value,
'%g' % mp.error,
minos_m,
minos_p,
limit_m,
limit_p,
fixed,
]
data.append(tmp)
alignment = '|c|r|r|r|r|r|r|r|c|'
ret = LatexTable(data, headers=headers, alignment=alignment,
smart_latex=smart_latex, latex_map=latex_map)
ret.float_format = float_format
return ret
|
the-stack_106_22253 | # -*- coding: utf-8 -*-
"""
Created on Nov 24, 2014
@author: moloch
Copyright 2014 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Handlers for user-related tasks.
"""
# pylint: disable=unused-wildcard-import
# pylint: disable=no-member
import logging
from models.Team import Team
from models.Box import Box
from models.User import User, ADMIN_PERMISSION
from models.Permission import Permission
from models.GameLevel import GameLevel
from handlers.BaseHandlers import BaseHandler
from libs.SecurityDecorators import *
from libs.ValidationError import ValidationError
from libs.EventManager import EventManager
from libs.Identicon import identicon
from builtins import str
from tornado.options import options
from netaddr import IPAddress
class AdminManageUsersHandler(BaseHandler):
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def get(self, *args, **kwargs):
self.render("admin/view/users.html", errors=None)
class AdminUserStatsHandler(BaseHandler):
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def get(self, *args, **kwargs):
uuid = self.get_argument("uuid", None)
user = User.by_uuid(uuid)
self.render("admin/view/user_stats.html", user=user, errors=None)
class AdminEditTeamsHandler(BaseHandler):
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
try:
group = self.get_argument("team_uuid", "all")
message = self.get_argument("message", "")
value = int(self.get_argument("money", 0))
if group == "all":
teams = Team.all()
for team in teams:
team.money += value
self.dbsession.add(team)
else:
team = Team.by_uuid(group)
team.money += value
self.dbsession.add(team)
self.dbsession.commit()
self.event_manager.admin_score_update(team, message, value)
self.redirect("/admin/users")
except ValidationError as error:
self.render("admin/view/users.html", errors=[str(error)])
class AdminEditUsersHandler(BaseHandler):
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
uri = {"user": self.edit_user, "team": self.edit_team}
if len(args) and args[0] in uri:
uri[args[0]]()
else:
self.redirect("/admin/users")
def edit_team(self):
""" Edits the team object """
try:
team = Team.by_uuid(self.get_argument("uuid", ""))
if team is None:
raise ValidationError("Team does not exist")
team.name = self.get_argument("name", team.name)
team.motto = self.get_argument("motto", team.motto)
team.money = self.get_argument("money", team.money)
team.notes = self.get_argument("notes", "")
if hasattr(self.request, "files") and "avatarfile" in self.request.files:
team.avatar = self.request.files["avatarfile"][0]["body"]
else:
avatar = self.get_argument("avatar", team.avatar)
if team.avatar != avatar and avatar != "":
# allow for default without setting
team._avatar = avatar
self.dbsession.add(team)
self.dbsession.commit()
self.event_manager.push_score_update()
self.redirect("/admin/users")
except ValidationError as error:
self.render("admin/view/users.html", errors=[str(error)])
def edit_user(self):
""" Update user objects in the database """
try:
user = User.by_uuid(self.get_argument("uuid", ""))
if user is None:
raise ValidationError("User does not exist")
handle = self.get_argument("handle", "")
if user.handle != handle:
if User.by_handle(handle) is None:
logging.info("Updated user handle %s -> %s" % (user.handle, handle))
user.handle = handle
else:
raise ValidationError("Handle is already in use")
name = self.get_argument("name", "")
email = self.get_argument("email", "")
notes = self.get_argument("notes", "")
if user.name != name:
logging.info("Updated user Name %s -> %s" % (user.name, name))
user.name = name
if user.email != email:
logging.info("Updated user Email %s -> %s" % (user.email, email))
user.email = email
if user.notes != notes:
logging.info("Updated user Notes %s -> %s" % (user.notes, notes))
user.notes = notes
if options.banking:
hash_algorithm = self.get_argument("hash_algorithm", "")
if hash_algorithm != user.algorithm:
if hash_algorithm in user.algorithms:
if 0 < len(self.get_argument("bank_password", "")):
logging.info(
"Updated %s's hashing algorithm %s -> %s"
% (user.handle, user.algorithm, hash_algorithm)
)
user.algorithm = hash_algorithm
else:
raise ValidationError(
"You must provide a new bank password when updating the hashing algorithm"
)
else:
raise ValidationError("Not a valid hash algorithm")
if len(self.get_argument("bank_password", "")):
user.bank_password = self.get_argument("bank_password", "")
password = self.get_argument("password", "")
if password and len(password) > 0:
user.password = password
if hasattr(self.request, "files") and "avatarfile" in self.request.files:
user.avatar = self.request.files["avatarfile"][0]["body"]
else:
avatar = self.get_argument("avatar", user.avatar)
# allow for default without setting
user._avatar = avatar
admin = self.get_argument("admin", "false")
team = Team.by_uuid(self.get_argument("team_uuid", ""))
if team is not None:
if user not in team.members:
logging.info(
"Updated %s's team %s -> %s"
% (user.handle, user.team_id, team.name)
)
user.team_id = team.id
elif options.teams and admin != "true":
raise ValidationError("Please select a valid Team.")
if admin == "true" and not user.is_admin():
logging.info("Promoted user %s to Admin" % user.handle)
permission = Permission()
permission.name = ADMIN_PERMISSION
permission.user_id = user.id
user.team_id = None
self.dbsession.add(permission)
elif admin == "false" and user.is_admin():
logging.info("Demoted user %s to Player" % user.handle)
if user == self.get_current_user():
self.render(
"admin/view/users.html", errors=["You cannont demote yourself."]
)
return
if team is None:
team = Team.by_name(user.handle)
if team is None:
team = self.create_team(user)
user.team_id = team.id
permissions = Permission.by_user_id(user.id)
for permission in permissions:
if permission.name == ADMIN_PERMISSION:
self.dbsession.delete(permission)
self.dbsession.add(user)
self.dbsession.commit()
self.event_manager.push_score_update()
self.redirect("/admin/users")
except ValidationError as error:
self.render("admin/view/users.html", errors=[str(error)])
def create_team(self, user):
team = Team()
team.name = user.handle
team.motto = ""
team._avatar = identicon(team.name, 6)
if self.config.banking:
team.money = self.config.starting_team_money
else:
team.money = 0
level_0 = GameLevel.by_number(0)
if not level_0:
level_0 = GameLevel.all()[0]
team.game_levels.append(level_0)
self.dbsession.add(team)
return team
class AdminDeleteUsersHandler(BaseHandler):
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
uri = {"user": self.del_user, "team": self.del_team}
if len(args) and args[0] in uri:
uri[args[0]]()
else:
self.redirect("/admin/users")
def del_user(self):
"""
Delete user objects in the database, you cannot delete yourself.
"""
user = User.by_uuid(self.get_argument("uuid", ""))
if user is None:
self.render("admin/view/users.html", errors=["User does not exist"])
elif user == self.get_current_user():
self.render("admin/view/users.html", errors=["You cannot delete yourself."])
else:
logging.info("Deleted User: '%s'" % str(user.handle))
EventManager.instance().deauth(user)
self.dbsession.delete(user)
self.dbsession.commit()
self.event_manager.push_score_update()
self.redirect("/admin/users")
def del_team(self):
"""
Delete team objects in the database.
"""
team = Team.by_uuid(self.get_argument("uuid", ""))
if team is not None:
logging.info("Deleted Team: '%s'" % str(team.name))
for user in team.members:
if user == self.get_current_user():
self.render(
"admin/view/users.html",
errors=["Unable to delete user %s" % user.handle],
)
return
EventManager.instance().deauth(user)
self.dbsession.delete(team)
self.dbsession.commit()
self.event_manager.push_score_update()
self.redirect("/admin/users")
else:
self.render("admin/view/users.html", errors=["Team does not exist"])
class AdminBanHammerHandler(BaseHandler):
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
uri = {"add": self.ban_add, "clear": self.ban_clear, "config": self.ban_config}
if len(args) and args[0] in uri:
uri[args[0]]()
self.redirect("/user")
def ban_config(self):
""" Configure the automatic ban settings """
if self.get_argument("automatic_ban", "") == "true":
self.application.settings["automatic_ban"] = True
try:
threshold = abs(int(self.get_argument("threshold_size", "10")))
except ValueError:
threshold = 10
logging.info("Automatic ban enabled, with threshold of %d" % threshold)
self.application.settings["blacklist_threshold"] = threshold
else:
logging.info("Automatic ban disabled")
self.application.settings["automatic_ban"] = False
def ban_add(self):
""" Add an ip address to the banned list """
try:
ip = self.get_argument("ip", "")
if not IPAddress(ip).is_loopback():
logging.info("Banned new ip: %s" % ip)
self.application.settings["blacklisted_ips"].append(ip)
except:
pass # Don't care about exceptions here
def ban_clear(self):
""" Remove an ip from the banned list """
ip = self.get_argument("ip", "")
if ip in self.application.settings["blacklisted_ips"]:
logging.info("Removed ban on ip: %s" % ip)
self.application.settings["blacklisted_ips"].remove(ip)
self.application.settings["failed_logins"][ip] = 0
class AdminLockHandler(BaseHandler):
""" Used to manually lock/unlocked accounts """
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
""" Calls an lock based on URL """
uri = {"user": self.lock_user, "box": self.lock_box}
if len(args) and args[0] in uri:
uri[args[0]]()
else:
self.render("public/404.html")
def lock_user(self):
""" Toggle account lock """
user = User.by_uuid(self.get_argument("uuid", ""))
if user is not None:
user.locked = False if user.locked else True
self.dbsession.add(user)
self.dbsession.commit()
self.redirect("/admin/users")
def lock_box(self):
uuid = self.get_argument("uuid", "")
box = Box.by_uuid(uuid)
if box is not None:
box.locked = False if box.locked else True
self.dbsession.add(box)
self.dbsession.commit()
self.redirect("/admin/view/game_objects#%s" % str(uuid))
class AdminAjaxUserHandler(BaseHandler):
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
uri = {"user": self.user_details, "team": self.team_details}
if len(args) and args[0] in uri:
uri[args[0]]()
def team_details(self):
# print(self.get_argument('uuid', ''))
team = Team.by_uuid(self.get_argument("uuid", ""))
if team is not None:
self.write(team.to_dict())
else:
self.write({})
def user_details(self):
user = User.by_uuid(self.get_argument("uuid", ""))
# print(user)
if user is not None:
self.write(user.to_dict())
else:
self.write({})
|
the-stack_106_22254 | import src.model.tlform as tlform
import src.model.term as TERM
import src.model.rpython as rpy
import src.model.pattern as pattern
from src.codegen.pattern import PatternCodegen
from src.codegen.term import TermCodegen
from src.util import SymGen
from src.context import CompilationContext
from src.codegen.common import TermHelperFuncs, MatchHelperFuncs, \
MatchMethodTable, TermKind, \
TermMethodTable, ReadFromStdinAndParse
#------------------------------
# Top-level form codegen
#------------------------------
class TopLevelFormCodegen(tlform.TopLevelFormVisitor):
def __init__(self, module, context):
assert isinstance(module, tlform.Module)
assert isinstance(context, CompilationContext)
self.module = module
self.context = context
self.symgen = SymGen()
self.modulebuilder = rpy.BlockBuilder()
self.main_procedurecalls = []
def run(self):
self.modulebuilder.IncludeFromPythonSource('runtime/term.py')
self.modulebuilder.IncludeFromPythonSource('runtime/parser.py')
self.modulebuilder.IncludeFromPythonSource('runtime/fresh.py')
self.modulebuilder.IncludeFromPythonSource('runtime/match.py')
# parse all term literals.
# ~~ 26.07.2020 disable lit terms for now, need to implement
# nt caching acceleration technique first.
"""
tmp0, tmp1 = rpy.gen_pyid_temporaries(2, self.symgen)
for trm, sym1 in self.context._litterms.items():
sym1 = rpy.gen_pyid_for(sym1)
self.modulebuilder.AssignTo(tmp0).New('Parser', rpy.PyString(repr(trm)))
self.modulebuilder.AssignTo(sym1).MethodCall(tmp0, 'parse')
"""
# variable-not-otherwise-mentioned of given define language
for ident, variables in self.context.get_variables_mentioned_all():
ident = rpy.gen_pyid_for(ident)
variables = map(lambda v: rpy.PyString(v), variables)
self.modulebuilder.AssignTo(ident).PySet(*variables)
for form in self.module.tlforms:
self._visit(form)
# generate main
fb = rpy.BlockBuilder()
symgen = SymGen()
## emit some dummy Terms to aid RPython with type inference.
tmp0, tmp1, tmp2, tmp3, tmp4 = rpy.gen_pyid_temporaries(5, symgen)
fb.AssignTo(tmp0).New('Integer', rpy.PyInt(0))
fb.AssignTo(tmp1).New('Float', rpy.PyFloat(0.0))
fb.AssignTo(tmp2).New('String', rpy.PyString("\"hello world!\""))
fb.AssignTo(tmp3).New('Boolean', rpy.PyString("#f"))
fb.AssignTo(tmp4).New('Variable', rpy.PyString("x"))
for procedure in self.main_procedurecalls:
tmpi = rpy.gen_pyid_temporaries(1, symgen)
fb.AssignTo(tmpi).FunctionCall(procedure)
fb.Return(rpy.PyInt(0))
self.modulebuilder.Function('entrypoint').WithParameters(rpy.PyId('argv')).Block(fb)
#required entry procedure for Rpython.
fb = rpy.BlockBuilder()
fb.Return(rpy.PyTuple(rpy.PyId('entrypoint'), rpy.PyNone()))
self.modulebuilder.Function('target').WithParameters(rpy.PyVarArg('args')).Block(fb)
# if __name__ == '__main__': entrypoint()
# for python2.7 compatibility.
ifb = rpy.BlockBuilder()
tmp = rpy.gen_pyid_temporaries(1, self.symgen)
ifb.AssignTo(tmp).FunctionCall('entrypoint', rpy.PyList())
self.modulebuilder.If.Equal(rpy.PyId('__name__'), rpy.PyString('__main__')).ThenBlock(ifb)
return rpy.Module(self.modulebuilder.build())
def _codegenNtDefinition(self, languagename, ntdef):
assert isinstance(ntdef, tlform.DefineLanguage.NtDefinition)
for pat in ntdef.patterns:
if self.context.get_toplevel_function_for_pattern(languagename, repr(pat)) is None:
PatternCodegen(self.modulebuilder, pat, self.context, languagename, self.symgen).run()
nameof_this_func = 'lang_{}_isa_nt_{}'.format(languagename, ntdef.nt.prefix)
term, match, matches = rpy.gen_pyid_for('term', 'match', 'matches')
# for each pattern in ntdefinition
# match = Match(...)
# matches = matchpat(term, match, 0, 1)
# if len(matches) != 0:
# return True
fb = rpy.BlockBuilder()
for pat in ntdef.patterns:
func2call = self.context.get_toplevel_function_for_pattern(languagename, repr(pat))
ifb = rpy.BlockBuilder()
ifb.Return(rpy.PyBoolean(True))
fb.AssignTo(matches).FunctionCall(func2call, term)
fb.If.LengthOf(matches).NotEqual(rpy.PyInt(0)).ThenBlock(ifb)
fb.Return(rpy.PyBoolean(False))
self.modulebuilder.Function(nameof_this_func).WithParameters(term).Block(fb)
def _visitDefineLanguage(self, form):
assert isinstance(form, tlform.DefineLanguage)
# generate hole for each language. Need this for term annotation.
hole = rpy.gen_pyid_for('{}_hole'.format(form.name))
self.modulebuilder.AssignTo(hole).New('Hole')
# first insert isa_nt functions intocontext
for ntsym, ntdef in form.nts.items():
nameof_this_func = 'lang_{}_isa_nt_{}'.format(form.name, ntsym)
self.context.add_isa_function_name(form.name, ntdef.nt.prefix, nameof_this_func)
for nt in form.nts.values():
self._codegenNtDefinition(form.name, nt)
def _visitRequirePythonSource(self, form):
assert isinstance(form, tlform.RequirePythonSource)
self.modulebuilder.IncludeFromPythonSource(form.filename)
def _visitRedexMatch(self, form, callself=True):
assert isinstance(form, tlform.RedexMatch)
assert False
if self.context.get_toplevel_function_for_pattern(form.languagename, repr(form.pat)) is None:
PatternCodegen(self.modulebuilder, form.pat, self.context, form.languagename, self.symgen).run()
TermCodegen(self.modulebuilder, self.context).transform(form.termstr)
termfunc = self.context.get_function_for_term_template(form.termstr)
matchfunc = self.context.get_toplevel_function_for_pattern(form.languagename, repr(form.pat))
symgen = SymGen()
matches, match, term = rpy.gen_pyid_for('matches', 'match', 'term')
tmp0 = rpy.gen_pyid_temporaries(1, symgen)
fb = rpy.BlockBuilder()
fb.AssignTo(tmp0).New('Match')
fb.AssignTo(term).FunctionCall(termfunc, tmp0)
fb.AssignTo(matches).FunctionCall(matchfunc, term)
fb.Print(matches)
fb.Return(matches)
# call redex-match itself.
nameof_this_func = self.symgen.get('redexmatch')
self.context.add_redexmatch_for(form, nameof_this_func)
self.modulebuilder.Function(nameof_this_func).Block(fb)
if callself:
tmp0 = rpy.gen_pyid_temporaries(1, self.symgen)
self.modulebuilder.AssignTo(tmp0).FunctionCall(nameof_this_func)
def _visitRedexMatchAssertEqual(self, form):
def gen_matches(expectedmatches, fb, symgen):
processedmatches = []
for m in expectedmatches:
tmp0 = rpy.gen_pyid_temporaries(1, symgen)
fb.AssignTo(tmp0).New('Match')
processedmatches.append(tmp0)
for sym, termx in m.bindings:
tmp1, tmp2, tmp3, tmp4 = rpy.gen_pyid_temporaries(4, symgen)
TermCodegen(self.modulebuilder, self.context).transform(termx)
termfunc = self.context.get_function_for_term_template(termx)
fb.AssignTo(tmp1).New('Match')
fb.AssignTo(tmp2).FunctionCall(termfunc, tmp1)
fb.AssignTo(tmp3).MethodCall(tmp0, MatchMethodTable.AddKey, rpy.PyString(sym))
fb.AssignTo(tmp4).MethodCall(tmp0, MatchMethodTable.AddToBinding, rpy.PyString(sym), tmp2)
tmpi = rpy.gen_pyid_temporaries(1, symgen)
fb.AssignTo(tmpi).PyList(*processedmatches)
return tmpi
assert isinstance(form, tlform.RedexMatchAssertEqual)
if self.context.get_toplevel_function_for_pattern(form.languagename, repr(form.pat)) is None:
PatternCodegen(self.modulebuilder, form.pat, self.context, form.languagename, self.symgen).run()
TermCodegen(self.modulebuilder, self.context).transform(form.termtemplate)
matchfunc = self.context.get_toplevel_function_for_pattern(form.languagename, repr(form.pat))
termfunc = self.context.get_function_for_term_template(form.termtemplate)
symgen = SymGen()
matches, match, term = rpy.gen_pyid_for('matches', 'match', 'term')
fb = rpy.BlockBuilder()
expectedmatches = gen_matches(form.expectedmatches, fb, symgen)
tmp0, tmp1, tmp2 = rpy.gen_pyid_temporaries(3, symgen)
fb.AssignTo(tmp0).New('Match')
fb.AssignTo(term).FunctionCall(termfunc, tmp0)
fb.AssignTo(matches).FunctionCall(matchfunc, term)
fb.AssignTo(tmp1).FunctionCall('assert_compare_match_lists', matches, expectedmatches)
fb.AssignTo(tmp2).FunctionCall(MatchHelperFuncs.PrintMatchList, matches)
fb.Return(matches)
nameof_this_func = self.symgen.get('redexmatchassertequal')
self.context.add_redexmatch_for(form, nameof_this_func)
self.modulebuilder.Function(nameof_this_func).Block(fb)
self.main_procedurecalls.append(nameof_this_func)
def _visitTermLetAssertEqual(self, form):
assert isinstance(form, tlform.TermLetAssertEqual)
template = form.template
TermCodegen(self.modulebuilder, self.context).transform(template)
templatetermfunc = self.context.get_function_for_term_template(template)
TermCodegen(self.modulebuilder, self.context).transform(form.expected)
expectedtermfunc = self.context.get_function_for_term_template(form.expected)
fb = rpy.BlockBuilder()
symgen = SymGen()
expected, match = rpy.gen_pyid_for('expected', 'match')
tmp0 = rpy.gen_pyid_temporaries(1, symgen)
fb.AssignTo(tmp0).New('Match')
fb.AssignTo(expected).FunctionCall(expectedtermfunc, tmp0)
fb.AssignTo(match).New('Match')
for variable, term in form.variableassignments.items():
tmp1, tmp2, tmp3, tmp4 = rpy.gen_pyid_temporaries(4, symgen)
TermCodegen(self.modulebuilder, self.context).transform(term)
termfunc = self.context.get_function_for_term_template(term)
fb.AssignTo(tmp1).New('Match')
fb.AssignTo(tmp2).FunctionCall(termfunc, tmp1)
fb.AssignTo(tmp3).MethodCall(match, MatchMethodTable.AddKey, rpy.PyString(variable))
fb.AssignTo(tmp4).MethodCall(match, MatchMethodTable.AddToBinding, rpy.PyString(variable), tmp2)
tmp0, tmp1, tmp2 = rpy.gen_pyid_temporaries(3, symgen)
fb.AssignTo(tmp0).FunctionCall(templatetermfunc, match)
fb.AssignTo(tmp1).FunctionCall('asserttermsequal', tmp0, expected)
fb.AssignTo(tmp2).FunctionCall(TermHelperFuncs.PrintTerm, tmp0)
nameof_this_func = self.symgen.get('asserttermequal')
self.modulebuilder.Function(nameof_this_func).Block(fb)
self.main_procedurecalls.append(nameof_this_func)
def _codegenReductionCase(self, rc, languagename, reductionrelationname, nameof_domaincheck=None):
assert isinstance(rc, tlform.DefineReductionRelation.ReductionCase)
if self.context.get_toplevel_function_for_pattern(languagename, repr(rc.pattern)) is None:
PatternCodegen(self.modulebuilder, rc.pattern, self.context, languagename, self.symgen).run()
TermCodegen(self.modulebuilder, self.context).transform(rc.termtemplate)
nameof_matchfn = self.context.get_toplevel_function_for_pattern(languagename, repr(rc.pattern))
nameof_termfn = self.context.get_function_for_term_template(rc.termtemplate)
nameof_rc = self.symgen.get('{}_{}_case'.format(languagename, reductionrelationname))
symgen = SymGen()
# terms = []
# matches = match(term)
# if len(matches) != 0:
# for match in matches:
# tmp0 = gen_term(match)
# tmp2 = match_domain(tmp0)
# if len(tmp2) == 0:
# raise Exception('reduction-relation {}: term reduced from {} to {} via rule {} and is outside domain')
# tmp1 = terms.append(tmp0)
# return terms
terms, term, matches, match = rpy.gen_pyid_for('terms', 'term', 'matches', 'match')
tmp0, tmp1, tmp2 = rpy.gen_pyid_temporaries(3, symgen)
forb = rpy.BlockBuilder()
forb.AssignTo(tmp0).FunctionCall(nameof_termfn, match)
if nameof_domaincheck is not None:
ifb = rpy.BlockBuilder()
tmpa, tmpb = rpy.gen_pyid_temporaries(2, symgen)
ifb.AssignTo(tmpa).MethodCall(term, TermMethodTable.ToString)
ifb.AssignTo(tmpb).MethodCall(tmp0, TermMethodTable.ToString)
ifb.RaiseException('reduction-relation \\"{}\\": term reduced from %s to %s via rule \\"{}\\" is outside domain' \
.format(reductionrelationname, rc.name),
tmpa, tmpb)
forb.AssignTo(tmp2).FunctionCall(nameof_domaincheck, tmp0)
forb.If.LengthOf(tmp2).Equal(rpy.PyInt(0)).ThenBlock(ifb)
forb.AssignTo(tmp1).MethodCall(terms, 'append', tmp0)
ifb = rpy.BlockBuilder()
ifb.For(match).In(matches).Block(forb)
fb = rpy.BlockBuilder()
fb.AssignTo(terms).PyList()
fb.AssignTo(matches).FunctionCall(nameof_matchfn, term)
fb.If.LengthOf(matches).NotEqual(rpy.PyInt(0)).ThenBlock(ifb)
fb.Return(terms)
self.modulebuilder.Function(nameof_rc).WithParameters(term).Block(fb)
return nameof_rc
def _visitDefineReductionRelation(self, form):
assert isinstance(form, tlform.DefineReductionRelation)
# def reduction_relation_name(term):
# outterms = []
# {for each case}
# tmpi = rc(term)
# outterms = outterms + tmp{i}
# return outterms
if form.domain != None:
if self.context.get_toplevel_function_for_pattern(form.languagename, repr(form.domain)) is None:
PatternCodegen(self.modulebuilder, form.domain, self.context, form.languagename, self.symgen).run()
nameof_domaincheck = None
if form.domain != None:
nameof_domaincheck = self.context.get_toplevel_function_for_pattern(form.languagename, repr(form.domain))
rcfuncs = []
for rc in form.reductioncases:
rcfunc = self._codegenReductionCase(rc, form.languagename, form.name, nameof_domaincheck)
rcfuncs.append(rcfunc)
terms, term = rpy.gen_pyid_for('terms', 'term')
symgen = SymGen()
fb = rpy.BlockBuilder()
if nameof_domaincheck != None:
tmp0 = rpy.gen_pyid_temporaries(1, symgen)
ifb = rpy.BlockBuilder()
tmpa = rpy.gen_pyid_temporaries(1, symgen)
ifb.AssignTo(tmpa).MethodCall(term, TermMethodTable.ToString)
ifb.RaiseException('reduction-relation not defined for %s', tmpa)
fb.AssignTo(tmp0).FunctionCall(nameof_domaincheck, term)
fb.If.LengthOf(tmp0).Equal(rpy.PyInt(0)).ThenBlock(ifb)
fb.AssignTo(terms).PyList()
for rcfunc in rcfuncs:
tmpi = rpy.gen_pyid_temporaries(1, symgen)
fb.AssignTo(tmpi).FunctionCall(rcfunc, term)
fb.AssignTo(terms).Add(terms, tmpi)
fb.Return(terms)
nameof_function = '{}_{}'.format(form.languagename, form.name)
self.context.add_reduction_relation(form.name, nameof_function)
self.modulebuilder.Function(nameof_function).WithParameters(term).Block(fb)
return form
# This generates call to reduction relation. Used by multiple other tlforms.
def _genreductionrelation(self, fb, symgen, nameof_reductionrelation, term):
TermCodegen(self.modulebuilder, self.context).transform(term)
termfunc = self.context.get_function_for_term_template(term)
term, terms = rpy.gen_pyid_for('term', 'terms')
tmp0, tmp1 = rpy.gen_pyid_temporaries(2, symgen)
fb.AssignTo(tmp0).New('Match')
fb.AssignTo(term).FunctionCall(termfunc, tmp0)
fb.AssignTo(terms).FunctionCall(nameof_reductionrelation, term)
fb.AssignTo(tmp1).FunctionCall(TermHelperFuncs.PrintTermList, terms)
return terms
def _visitApplyReductionRelationAssertEqual(self, form):
assert isinstance(form, tlform.ApplyReductionRelationAssertEqual)
def gen_terms(termtemplates, fb, symgen):
processed = []
for expectedterm in termtemplates:
TermCodegen(self.modulebuilder, self.context).transform(expectedterm)
expectedtermfunc = self.context.get_function_for_term_template(expectedterm)
tmpi, tmpj = rpy.gen_pyid_temporaries(2, symgen)
fb.AssignTo(tmpi).New('Match')
fb.AssignTo(tmpj).FunctionCall(expectedtermfunc, tmpi)
processed.append(tmpj)
tmpi = rpy.gen_pyid_temporaries(1, symgen)
fb.AssignTo(tmpi).PyList(*processed)
return tmpi
nameof_reductionrelation = self.context.get_reduction_relation(form.reductionrelationname)
fb = rpy.BlockBuilder()
symgen = SymGen()
tmp0 = rpy.gen_pyid_temporaries(1, symgen)
expectedterms = gen_terms(form.expected_termtemplates, fb, symgen)
terms = self._genreductionrelation(fb, symgen, nameof_reductionrelation, form.term)
fb.AssignTo(tmp0).FunctionCall(TermHelperFuncs.AssertTermListsEqual, terms, expectedterms)
nameof_function = self.symgen.get('applyreductionrelationassertequal')
self.modulebuilder.Function(nameof_function).Block(fb)
self.main_procedurecalls.append(nameof_function)
def _visitApplyReductionRelation(self, form):
assert isinstance(form, tlform.ApplyReductionRelation)
nameof_reductionrelation = self.context.get_reduction_relation(form.reductionrelationname)
assert nameof_reductionrelation != None
fb = rpy.BlockBuilder()
symgen = SymGen()
self._genreductionrelation(fb, symgen, nameof_reductionrelation, form.term)
tmp1 = rpy.gen_pyid_temporaries(1, symgen)
nameof_function = self.symgen.get('applyreductionrelation')
self.modulebuilder.Function(nameof_function).Block(fb)
self.modulebuilder.AssignTo(tmp1).FunctionCall(nameof_function)
# metafunction case may produce multiple matches but after term plugging all terms
# must be the same.
def _codegenMetafunctionCase(self, metafunction, case, caseid, mfname):
assert isinstance(metafunction, tlform.DefineMetafunction)
assert isinstance(case, tlform.DefineMetafunction.MetafunctionCase)
#def mfcase(argterm):
# tmp0 = matchfunc(argterm)
# tmp1 = []
# if len(tmp0) == 0:
# return tmp1
# for tmp2 in tmp0:
# tmp3 = termfunc(tmp2)
# if tmp3 == None: continue
# tmp4 = tmp1.append(tmp3)
# tmp5 = aretermsequalpairwise(tmp1)
# if tmp5 != True:
# raise Exception('mfcase 1 matched (term) in len(tmp{i}) ways, single match is expected')
# tmp6 = tmp1[0]
# return tmp6
if self.context.get_toplevel_function_for_pattern(metafunction.languagename, repr(case.patternsequence)) is None:
PatternCodegen(self.modulebuilder, case.patternsequence, self.context, metafunction.languagename, self.symgen).run()
TermCodegen(self.modulebuilder, self.context).transform(case.termtemplate)
matchfunc = self.context.get_toplevel_function_for_pattern(metafunction.languagename, repr(case.patternsequence))
termfunc = self.context.get_function_for_term_template(case.termtemplate)
symgen = SymGen()
argterm = rpy.gen_pyid_for('argterm')
tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6 = rpy.gen_pyid_temporaries(7, symgen)
ifb1 = rpy.BlockBuilder()
ifb1.Return(tmp1)
ifbx = rpy.BlockBuilder()
ifbx.Continue
forb = rpy.BlockBuilder()
forb.AssignTo(tmp3).FunctionCall(termfunc, tmp2)
##FIXME temporary hack to simulate side-conditions in metafunctions.
##You'd use hand-written python function that returns None upon side-condition
## failure. Python functions are not allowed to return None otherwise.
forb.If.IsNone(tmp3).ThenBlock(ifbx)
forb.AssignTo(tmp4).MethodCall(tmp1, 'append', tmp3)
tmpa = rpy.gen_pyid_temporaries(1, symgen)
ifb2 = rpy.BlockBuilder()
ifb2.AssignTo(tmpa).MethodCall(argterm, TermMethodTable.ToString)
ifb2.RaiseException('meta-function {}: clause {} produced multiple terms when matching term %s' \
.format(metafunction.contract.name, caseid), tmpa)
fb = rpy.BlockBuilder()
fb.AssignTo(tmp0).FunctionCall(matchfunc, argterm)
fb.AssignTo(tmp1).PyList()
fb.For(tmp2).In(tmp0).Block(forb)
fb.If.LengthOf(tmp1).Equal(rpy.PyInt(0)).ThenBlock(ifb1)
fb.AssignTo(tmp5).FunctionCall(TermHelperFuncs.AreTermsEqualPairwise, tmp1)
fb.If.NotEqual(tmp5, rpy.PyBoolean(True)).ThenBlock(ifb2)
fb.AssignTo(tmp6).ArrayGet(tmp1, rpy.PyInt(0))
fb.Return(rpy.PyList(tmp6))
nameof_function = self.symgen.get('{}_case'.format(mfname))
self.modulebuilder.Function(nameof_function).WithParameters(argterm).Block(fb)
return nameof_function
def _visitDefineMetafunction(self, form):
assert isinstance(form, tlform.DefineMetafunction)
#def mf(argterm):
# tmp0 = domaincheck(argterm)
# if len(tmp0) == 0:
# raise Exception('mfname: term is not in my domain')
# { foreach reductioncase
# tmp{i} = mfcase(term)
# if len(tmp{i}) == 1:
# tmp{j} = tmp{i}[0]
# tmp{k} = codomaincheck(tmp{j})
# if len(tmp{k}) == 0:
# raise Exception('mfname: term not in my codomain')
# return tmp{j}
# }
# raise Exception('no metafuncion cases matched for term')
mfname = form.contract.name
nameof_function = self.symgen.get('metafunction')
self.context.add_metafunction(mfname, nameof_function)
if self.context.get_toplevel_function_for_pattern(form.languagename, repr(form.contract.domain)) is None:
PatternCodegen(self.modulebuilder, form.contract.domain, self.context, form.languagename, self.symgen).run()
domainmatchfunc = self.context.get_toplevel_function_for_pattern(form.languagename, repr(form.contract.domain))
if self.context.get_toplevel_function_for_pattern(form.languagename, repr(form.contract.codomain)) is None:
PatternCodegen(self.modulebuilder, form.contract.codomain, self.context, form.languagename, self.symgen).run()
codomainmatchfunc = self.context.get_toplevel_function_for_pattern(form.languagename, repr(form.contract.codomain))
symgen = SymGen()
argterm = rpy.gen_pyid_for('argterm')
tmp0, tmp1, tmp2 = rpy.gen_pyid_temporaries(3, symgen)
tmpa = rpy.gen_pyid_temporaries(1, symgen)
ifbd = rpy.BlockBuilder()
ifbd.AssignTo(tmpa).MethodCall(argterm, TermMethodTable.ToString)
ifbd.RaiseException('meta-function {}: term %s not in my domain'.format(mfname), tmpa)
fb = rpy.BlockBuilder()
fb.AssignTo(tmp0).FunctionCall(domainmatchfunc, argterm)
fb.If.LengthOf(tmp0).Equal(rpy.PyInt(0)).ThenBlock(ifbd)
for i, mfcase in enumerate(form.cases):
tmpi, tmpj, tmpk = rpy.gen_pyid_temporaries(3, symgen)
mfcasefunc = self._codegenMetafunctionCase(form, mfcase, i, nameof_function)
tmpa = rpy.gen_pyid_temporaries(1, symgen)
ifbi1 = rpy.BlockBuilder()
ifbi1.AssignTo(tmpa).MethodCall(tmpj, TermMethodTable.ToString)
ifbi1.RaiseException('meta-function {}: term %s not in my codomain'.format(mfname), tmpa)
ifbi2 = rpy.BlockBuilder()
ifbi2.AssignTo(tmpj).ArrayGet(tmpi, rpy.PyInt(0))
ifbi2.AssignTo(tmpk).FunctionCall(codomainmatchfunc, tmpj)
ifbi2.If.LengthOf(tmpk).Equal(rpy.PyInt(0)).ThenBlock(ifbi1)
ifbi2.Return(tmpj)
fb.AssignTo(tmpi).FunctionCall(mfcasefunc, argterm)
fb.If.LengthOf(tmpi).Equal(rpy.PyInt(1)).ThenBlock(ifbi2)
fb.RaiseException('meta-function \\"{}\\": no clauses matches'.format(mfname))
self.modulebuilder.Function(nameof_function).WithParameters(argterm).Block(fb)
return nameof_function
def _visitParseAssertEqual(self, form):
assert isinstance(form, tlform.ParseAssertEqual)
TermCodegen(self.modulebuilder, self.context).transform(form.expected_termtemplate)
termfunc = self.context.get_function_for_term_template(form.expected_termtemplate)
symgen = SymGen()
argterm = rpy.gen_pyid_for('argterm')
tmp0, tmp1, tmp2, tmp3, tmp4, tmp5 = rpy.gen_pyid_temporaries(6, symgen)
fb = rpy.BlockBuilder()
fb.AssignTo(tmp0).New('Parser', rpy.PyString(form.string2parse))
fb.AssignTo(tmp1).MethodCall(tmp0, 'parse')
fb.AssignTo(tmp2).New('Match')
fb.AssignTo(tmp3).FunctionCall(termfunc, tmp2)
fb.AssignTo(tmp4).FunctionCall('asserttermsequal', tmp1, tmp3)
fb.AssignTo(tmp5).FunctionCall(TermHelperFuncs.PrintTerm, tmp1)
nameof_this_func = self.symgen.get('parseassertequal')
self.modulebuilder.Function(nameof_this_func).Block(fb)
self.main_procedurecalls.append(nameof_this_func)
def _visitReadFromStdinAndApplyReductionRelation(self, form):
assert isinstance(form, tlform.ReadFromStdinAndApplyReductionRelation)
reduction_relation_func = self.context.get_reduction_relation(form.reductionrelationname)
symgen = SymGen()
term = rpy.gen_pyid_for('term')
tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7 = rpy.gen_pyid_temporaries(8, symgen)
forb = rpy.BlockBuilder()
forb.AssignTo(tmp3).FunctionCall(reduction_relation_func, term)
forb.AssignTo(tmp2).Add(tmp2, tmp3)
wh = rpy.BlockBuilder()
wh.AssignTo(tmp2).PyList()
wh.For(term).In(tmp1).Block(forb)
wh.AssignTo(tmp1).PyId(tmp2)
wh.AssignTo(tmp4).FunctionCall(TermHelperFuncs.PrintTermList, tmp1)
fb = rpy.BlockBuilder()
fb.AssignTo(tmp0).FunctionCall(ReadFromStdinAndParse)
if form.metafunctionname != None:
mfname = self.context.get_metafunction(form.metafunctionname)
fb.AssignTo(tmp5).New('Variable', rpy.PyString(form.metafunctionname))
fb.AssignTo(tmp6).New('Sequence', rpy.PyList(tmp5, tmp0))
fb.AssignTo(tmp0).FunctionCall(mfname, tmp6)
fb.AssignTo(tmp1).PyList(tmp0)
fb.AssignTo(tmp4).FunctionCall(TermHelperFuncs.PrintTermList, tmp1)
fb.While.LengthOf(tmp1).NotEqual(rpy.PyInt(0)).Block(wh)
nameof_this_func = self.symgen.get('readfromstdinandeval')
self.modulebuilder.Function(nameof_this_func).Block(fb)
self.main_procedurecalls.append(nameof_this_func)
return form
|
the-stack_106_22255 | import calendar
import logging
import time
from typing import Any, Dict, List, Optional, Tuple
from django.conf import settings
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils import translation
from django.utils.cache import patch_cache_control
from two_factor.utils import default_device
from zerver.decorator import zulip_login_required
from zerver.forms import ToSForm
from zerver.lib.actions import do_change_tos_version, realm_user_count
from zerver.lib.events import do_events_register
from zerver.lib.i18n import (
get_language_list,
get_language_list_for_templates,
get_language_name,
get_language_translation_data,
)
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.lib.streams import access_stream_by_name
from zerver.lib.subdomains import get_subdomain
from zerver.lib.users import compute_show_invites_and_add_streams
from zerver.lib.utils import generate_random_token, statsd
from zerver.models import Message, PreregistrationUser, Realm, Stream, UserProfile
from zerver.views.compatibility import is_outdated_desktop_app, is_unsupported_browser
from zerver.views.messages import get_latest_update_message_flag_activity
from zerver.views.portico import hello_view
def need_accept_tos(user_profile: Optional[UserProfile]) -> bool:
if user_profile is None: # nocoverage
return False
if settings.TERMS_OF_SERVICE is None: # nocoverage
return False
if settings.TOS_VERSION is None:
return False
return int(settings.TOS_VERSION.split('.')[0]) > user_profile.major_tos_version()
@zulip_login_required
def accounts_accept_terms(request: HttpRequest) -> HttpResponse:
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
do_change_tos_version(request.user, settings.TOS_VERSION)
return redirect(home)
else:
form = ToSForm()
email = request.user.delivery_email
special_message_template = None
if request.user.tos_version is None and settings.FIRST_TIME_TOS_TEMPLATE is not None:
special_message_template = 'zerver/' + settings.FIRST_TIME_TOS_TEMPLATE
return render(
request,
'zerver/accounts_accept_terms.html',
context={'form': form,
'email': email,
'special_message_template': special_message_template},
)
def detect_narrowed_window(request: HttpRequest,
user_profile: Optional[UserProfile]) -> Tuple[List[List[str]],
Optional[Stream],
Optional[str]]:
"""This function implements Zulip's support for a mini Zulip window
that just handles messages from a single narrow"""
if user_profile is None: # nocoverage
return [], None, None
narrow: List[List[str]] = []
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
# TODO: We should support stream IDs and PMs here as well.
narrow_stream_name = request.GET.get("stream")
(narrow_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, narrow_stream_name)
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.warning("Invalid narrow requested, ignoring", extra=dict(request=request))
if narrow_stream is not None and narrow_topic is not None:
narrow.append(["topic", narrow_topic])
return narrow, narrow_stream, narrow_topic
def update_last_reminder(user_profile: Optional[UserProfile]) -> None:
"""Reset our don't-spam-users-with-email counter since the
user has since logged in
"""
if user_profile is None: # nocoverage
return
if user_profile.last_reminder is not None: # nocoverage
# TODO: Look into the history of last_reminder; we may have
# eliminated that as a useful concept for non-bot users.
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
def get_furthest_read_time(user_profile: Optional[UserProfile]) -> Optional[float]:
if user_profile is None:
return time.time()
user_activity = get_latest_update_message_flag_activity(user_profile)
if user_activity is None:
return None
return calendar.timegm(user_activity.last_visit.utctimetuple())
def get_bot_types(user_profile: Optional[UserProfile]) -> List[Dict[str, object]]:
bot_types: List[Dict[str, object]] = []
if user_profile is None: # nocoverage
return bot_types
for type_id, name in UserProfile.BOT_TYPES.items():
bot_types.append({
'type_id': type_id,
'name': name,
'allowed': type_id in user_profile.allowed_bot_types,
})
return bot_types
def compute_navbar_logo_url(page_params: Dict[str, Any]) -> str:
if page_params["color_scheme"] == 2 and page_params["realm_night_logo_source"] != Realm.LOGO_DEFAULT:
navbar_logo_url = page_params["realm_night_logo_url"]
else:
navbar_logo_url = page_params["realm_logo_url"]
return navbar_logo_url
def home(request: HttpRequest) -> HttpResponse:
if not settings.ROOT_DOMAIN_LANDING_PAGE:
return home_real(request)
# If settings.ROOT_DOMAIN_LANDING_PAGE, sends the user the landing
# page, not the login form, on the root domain
subdomain = get_subdomain(request)
if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
return home_real(request)
return hello_view(request)
@zulip_login_required
def home_real(request: HttpRequest) -> HttpResponse:
# Before we do any real work, check if the app is banned.
client_user_agent = request.META.get("HTTP_USER_AGENT", "")
(insecure_desktop_app, banned_desktop_app, auto_update_broken) = is_outdated_desktop_app(
client_user_agent)
if banned_desktop_app:
return render(
request,
'zerver/insecure_desktop_app.html',
context={
"auto_update_broken": auto_update_broken,
},
)
(unsupported_browser, browser_name) = is_unsupported_browser(client_user_agent)
if unsupported_browser:
return render(
request,
'zerver/unsupported_browser.html',
context={
"browser_name": browser_name,
},
)
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
if request.user.is_authenticated:
user_profile = request.user
else: # nocoverage
# This code path should not be reachable because of zulip_login_required above.
user_profile = None
# If a user hasn't signed the current Terms of Service, send them there
if need_accept_tos(user_profile):
return accounts_accept_terms(request)
narrow, narrow_stream, narrow_topic = detect_narrowed_window(request, user_profile)
client_capabilities = {
'notification_settings_null': True,
'bulk_message_deletion': True,
}
register_ret = do_events_register(user_profile, request.client,
apply_markdown=True, client_gravatar=True,
slim_presence=True,
client_capabilities=client_capabilities,
narrow=narrow)
update_last_reminder(user_profile)
if user_profile is not None:
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = (
first_in_realm and
not PreregistrationUser.objects.filter(referred_by=user_profile).count()
)
needs_tutorial = user_profile.tutorial_status == UserProfile.TUTORIAL_WAITING
if user_profile.pointer == -1:
# Put the new user's pointer at the bottom
#
# This improves performance, because we limit backfilling of messages
# before the pointer. It's also likely that someone joining an
# organization is interested in recent messages more than the very
# first messages on the system.
register_ret['pointer'] = register_ret['max_message_id']
else: # nocoverage
first_in_realm = False
prompt_for_invites = False
# The current tutorial doesn't super make sense for logged-out users.
needs_tutorial = False
furthest_read_time = get_furthest_read_time(user_profile)
# We pick a language for the user as follows:
# * First priority is the language in the URL, for debugging.
# * If not in the URL, we use the language from the user's settings.
request_language = translation.get_language_from_path(request.path_info)
if request_language is None:
request_language = register_ret['default_language']
translation.activate(request_language)
# We also save the language to the user's session, so that
# something reasonable will happen in logged-in portico pages.
request.session[translation.LANGUAGE_SESSION_KEY] = translation.get_language()
two_fa_enabled = settings.TWO_FACTOR_AUTHENTICATION_ENABLED and user_profile is not None
# Pass parameters to the client-side JavaScript code.
# These end up in a global JavaScript Object named 'page_params'.
page_params = dict(
# Server settings.
debug_mode = settings.DEBUG,
test_suite = settings.TEST_SUITE,
poll_timeout = settings.POLL_TIMEOUT,
insecure_desktop_app = insecure_desktop_app,
login_page = settings.HOME_NOT_LOGGED_IN,
root_domain_uri = settings.ROOT_DOMAIN_URI,
save_stacktraces = settings.SAVE_FRONTEND_STACKTRACES,
warn_no_email = settings.WARN_NO_EMAIL,
search_pills_enabled = settings.SEARCH_PILLS_ENABLED,
# Misc. extra data.
initial_servertime = time.time(), # Used for calculating relative presence age
default_language_name = get_language_name(register_ret['default_language']),
language_list_dbl_col = get_language_list_for_templates(register_ret['default_language']),
language_list = get_language_list(),
needs_tutorial = needs_tutorial,
first_in_realm = first_in_realm,
prompt_for_invites = prompt_for_invites,
furthest_read_time = furthest_read_time,
has_mobile_devices = user_profile is not None and num_push_devices_for_user(user_profile) > 0,
bot_types = get_bot_types(user_profile),
two_fa_enabled = two_fa_enabled,
# Adding two_fa_enabled as condition saves us 3 queries when
# 2FA is not enabled.
two_fa_enabled_user = two_fa_enabled and bool(default_device(user_profile)),
)
undesired_register_ret_fields = [
'streams',
]
for field_name in set(register_ret.keys()) - set(undesired_register_ret_fields):
page_params[field_name] = register_ret[field_name]
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = narrow_stream.recipient
try:
initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id
except IndexError:
initial_pointer = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = initial_pointer
page_params["pointer"] = initial_pointer
page_params["enable_desktop_notifications"] = False
statsd.incr('views.home')
show_invites, show_add_streams = compute_show_invites_and_add_streams(user_profile)
show_billing = False
show_plans = False
if settings.CORPORATE_ENABLED and user_profile is not None:
from corporate.models import Customer, CustomerPlan
if user_profile.is_billing_admin or user_profile.is_realm_admin:
customer = Customer.objects.filter(realm=user_profile.realm).first()
if customer is not None and CustomerPlan.objects.filter(customer=customer).exists():
show_billing = True
if user_profile.realm.plan_type == Realm.LIMITED:
show_plans = True
request._log_data['extra'] = "[{}]".format(register_ret["queue_id"])
page_params['translation_data'] = {}
if request_language != 'en':
page_params['translation_data'] = get_language_translation_data(request_language)
csp_nonce = generate_random_token(48)
if user_profile is not None:
color_scheme = user_profile.color_scheme
is_guest = user_profile.is_guest
is_realm_owner = user_profile.is_realm_owner
is_realm_admin = user_profile.is_realm_admin
show_webathena = user_profile.realm.webathena_enabled
else: # nocoverage
color_scheme = UserProfile.COLOR_SCHEME_AUTOMATIC
is_guest = False
is_realm_admin = False
is_realm_owner = False
show_webathena = False
navbar_logo_url = compute_navbar_logo_url(page_params)
response = render(request, 'zerver/app/index.html',
context={'user_profile': user_profile,
'page_params': page_params,
'csp_nonce': csp_nonce,
'search_pills_enabled': settings.SEARCH_PILLS_ENABLED,
'show_invites': show_invites,
'show_add_streams': show_add_streams,
'show_billing': show_billing,
'corporate_enabled': settings.CORPORATE_ENABLED,
'show_plans': show_plans,
'is_owner': is_realm_owner,
'is_admin': is_realm_admin,
'is_guest': is_guest,
'color_scheme': color_scheme,
'navbar_logo_url': navbar_logo_url,
'show_webathena': show_webathena,
'embedded': narrow_stream is not None,
'invite_as': PreregistrationUser.INVITE_AS,
'max_file_upload_size_mib': settings.MAX_FILE_UPLOAD_SIZE,
})
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@zulip_login_required
def desktop_home(request: HttpRequest) -> HttpResponse:
return HttpResponseRedirect(reverse('zerver.views.home.home'))
|
the-stack_106_22256 | # coding=utf-8
# Copyright 2022 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoder/decoder for dm_env.specs.Array (and subclasses).
"""
from typing import Any, Dict, List, Optional, Tuple, Union
import dm_env
from dm_env import specs
import numpy as np
_ENVIRONMENT_SPEC_NAMES = [
'observation_spec',
'action_spec',
'reward_spec',
'discount_spec',
]
def encode_environment_specs(
env: Optional[dm_env.Environment]) -> Dict[str, Any]:
"""Encodes all the specs from a given environment."""
if env:
return {
'observation_spec': encode(env.observation_spec()),
'action_spec': encode(env.action_spec()),
'reward_spec': encode(env.reward_spec()),
'discount_spec': encode(env.discount_spec()),
}
return {}
def decode_environment_specs(
encoded_specs: Dict[str, Any]) -> Dict[str, Optional[specs.Array]]:
"""Decodes all the specs of an environment."""
if encoded_specs:
return {spec_name: decode(encoded_specs[spec_name])
for spec_name in _ENVIRONMENT_SPEC_NAMES}
return {spec_name: None for spec_name in _ENVIRONMENT_SPEC_NAMES}
def _array_spec_to_dict(array_spec: specs.Array) -> Dict[str, Any]:
"""Encodes an Array spec as a dictionary."""
dict_spec = {
'shape': np.array(array_spec.shape, dtype=np.int64),
'dtype': str(array_spec.dtype),
'name': array_spec.name,
}
if isinstance(array_spec, specs.BoundedArray):
dict_spec.update({
'minimum': array_spec.minimum,
'maximum': array_spec.maximum,
})
if isinstance(array_spec, specs.DiscreteArray):
dict_spec.update({'num_values': array_spec.num_values})
return dict_spec
def encode(
spec: Union[specs.Array, List[Any], Tuple[Any], Dict[str, Any]]
) -> Union[List[Any], Tuple[Any], Dict[str, Any]]:
"""Encodes `spec` using plain Python objects.
This function supports bare Array specs, lists of Array specs, Tuples of Array
specs, Dicts of string to Array specs and any combination of these things such
as Dict[str, Tuple[List[Array, Array]]].
Args:
spec: The actual spec to encode.
Returns:
The same spec encoded in a way that can be serialized to disk.
Raises:
TypeError: When the argument is not among the supported types.
"""
if isinstance(spec, specs.Array):
return _array_spec_to_dict(spec)
if isinstance(spec, list):
return [encode(x) for x in spec]
if isinstance(spec, tuple):
return tuple((encode(x) for x in spec))
if isinstance(spec, dict):
return {k: encode(v) for k, v in spec.items()}
raise TypeError(
'encode() should be called with an argument of type specs.Array (and '
f'subclasses), list, tuple or dict. Found {type(spec)}: {spec}.')
def decode(
spec: Union[List[Any], Tuple[Any], Dict[str, Any]]
) -> Union[specs.Array, List[Any], Tuple[Any], Dict[str, Any]]:
"""Parses `spec` into the supported dm_env spec formats."""
if isinstance(spec, dict):
if 'shape' in spec and 'dtype' in spec:
shape = spec['shape'] if spec['shape'] is not None else ()
if 'num_values' in spec:
# DiscreteArray case.
return specs.DiscreteArray(
num_values=spec['num_values'],
dtype=spec['dtype'],
name=spec['name'])
elif 'minimum' in spec and 'maximum' in spec:
# BoundedArray case.
return specs.BoundedArray(
shape=shape,
dtype=spec['dtype'],
minimum=spec['minimum'],
maximum=spec['maximum'],
name=spec['name'])
else:
# Base Array spec case.
return specs.Array(shape=shape, dtype=spec['dtype'], name=spec['name'])
# Recursively decode array elements.
return {k: decode(v) for k, v in spec.items()}
elif isinstance(spec, list):
return [decode(x) for x in spec]
elif isinstance(spec, tuple):
return tuple(decode(x) for x in spec)
raise TypeError(
'decode() should be called with an argument of type list, tuple or dict.'
f' Found: {type(spec)}: {spec}.')
|
the-stack_106_22258 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 21 21:43:08 2022
@author: Charissa
"""
#backtracking
import numpy as np
import math
import copy
from numpy.linalg import norm
from conversiontxtfile import converttolist
from conversiontxtfile import converttoarray
from checkdata import SVFinder
latlist = converttolist('testdata.txt')
def gramschmidts(latticelist):
'''
gram schimdts orthogonalisation
Parameters
----------
latticelist : list
a list of basis of lattice point of NTRU
Returns
-------
ortholist : list
outputs a list of orthogonal basis
'''
newlist = []
newlist.append(latticelist[0])
for i in range(1,len(latticelist)):
nextvector = np.zeros(len(latticelist[i]))
nextvector = latticelist[i]
for j in range(len(newlist)):
nextvector = nextvector - round(np.dot(latticelist[i],newlist[j])/(norm(newlist[j],2))**2) * newlist[j]
newlist.append(nextvector)
return newlist
initial = SVFinder(gramschmidts(latlist))[0]
def checknorm(l):
val = np.zeros(22)
for i in range(len(l)):
val+=l[i]*latlist[i]
return norm(val,2)
for i in range(len(initial)):
temp = 0
left = math.floor(initial[i])
leftlist = copy.deepcopy(initial)
leftlist[i] = left
right = round(left+1)
rightlist = copy.deepcopy(initial)
rightlist[i] = right
if checknorm(leftlist)<checknorm(rightlist):
initial = leftlist
elif checknorm(leftlist)>checknorm(rightlist):
initial = rightlist
else:
initial = leftlist
print(initial, checknorm(initial))
|
the-stack_106_22260 | #! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
BINARY = 'binary'
CATEGORY = 'category'
INT = 'int'
FLOAT = 'float'
SPACE = 'space'
NUMERICAL = 'numerical'
SET = 'set'
BAG = 'bag'
TEXT = 'text'
SEQUENCE = 'sequence'
TIMESERIES = 'timeseries'
IMAGE = 'image'
AUDIO = 'audio'
DATE = 'date'
H3 = 'h3'
VECTOR = 'vector'
HEIGHT = 'height'
WIDTH = 'width'
CROP_OR_PAD = 'crop_or_pad'
NUM_CHANNELS = 'num_channels'
INTERPOLATE = 'interpolate'
LOSS = 'loss'
EVAL_LOSS = 'eval_loss'
TRAIN_MEAN_LOSS = 'train_mean_loss'
SOFTMAX_CROSS_ENTROPY = 'softmax_cross_entropy'
SIGMOID_CROSS_ENTROPY = 'sigmoid_cross_entropy'
SAMPLED_SOFTMAX_CROSS_ENTROPY = 'sampled_softmax_cross_entropy'
ACCURACY = 'accuracy'
HITS_AT_K = 'hits_at_k'
MEAN_HITS_AT_K = 'mean_hits_at_k'
ERROR = 'error'
ABSOLUTE_ERROR = 'absolute_error'
SQUARED_ERROR = 'squared_error'
MEAN_SQUARED_ERROR = 'mean_squared_error'
MEAN_ABSOLUTE_ERROR = 'mean_absolute_error'
R2 = 'r2'
EDIT_DISTANCE = 'edit_distance'
PERPLEXITY = 'perplexity'
JACCARD = 'jaccard'
PREDICTIONS = 'predictions'
TOP_K_PREDICTIONS = 'top_k_predictions'
PROBABILITY = 'probability'
PROBABILITIES = 'probabilities'
TOKEN_ACCURACY = 'token_accuracy'
LAST_ACCURACY = 'last_accuracy'
SEQUENCE_ACCURACY = 'sequence_accuracy'
LAST_PROBABILTIES = 'last_probabilities'
LAST_PREDICTIONS = 'last_predictions'
LENGTHS = 'lengths'
TIED = 'tied'
COMBINED = 'combined'
PREPROCESSING = 'preprocessing'
FILL_WITH_CONST = 'fill_with_const'
FILL_WITH_MODE = 'fill_with_mode'
FILL_WITH_MEAN = 'fill_with_mean'
BACKFILL = 'backfill'
DROP_ROW = 'drop_row'
METRIC = 'metric'
PREDICTION = 'prediction'
LOGITS = 'logits'
HIDDEN = 'hidden'
LAST_HIDDEN = 'last_hidden'
PROJECTION_INPUT = 'projection_input'
SUM = 'sum'
APPEND = 'append'
SEQ_SUM = 'seq_sum'
AVG_EXP = 'avg_exp'
TRAINING = 'training'
VALIDATION = 'validation'
TEST = 'test'
SPLIT = 'split'
FULL = 'full'
HYPEROPT = 'hyperopt'
STRATEGY = 'strategy'
EXECUTOR = 'executor'
MINIMIZE = 'minimize'
MAXIMIZE = 'maximize'
SAMPLER = 'sampler'
NAME = 'name'
COLUMN = 'column'
TYPE = 'type'
RAY = 'ray'
PROC_COLUMN = 'proc_column'
CHECKSUM = 'checksum'
HDF5 = 'hdf5'
PARQUET = 'parquet'
|
the-stack_106_22261 | import random
class Hat:
def __init__(self, **kwargs):
self.contents = []
for k, v in kwargs.items():
for i in range(v):
self.contents.append(k)
def draw(self, n):
total = self.contents
if n >= len(total):
return self.contents
out = []
for i in range(n):
k = random.randrange(0, len(total))
out.append(total[k])
total.pop(k)
return out
def experiment(hat, expected_balls, num_balls_drawn, num_experiments):
num_succeeded = 0
save = hat.contents.copy()
for i in range(num_experiments):
hat.contents = save.copy()
drawn = hat.draw(num_balls_drawn)
drawn_balls = {}
for j in drawn:
drawn_balls[j] = drawn_balls.get(j, 0) + 1
b = True
for k, v in expected_balls.items():
if drawn_balls.get(k, 0) < v:
b = False
break
if b:
num_succeeded += 1
return num_succeeded/num_experiments
|
the-stack_106_22263 | import requests
from lxml.html import fromstring
import requests
from itertools import cycle
import traceback
def get_proxies():
url = 'https://free-proxy-list.net/'
response = requests.get(url)
parser = fromstring(response.text)
proxies = set()
for i in parser.xpath('//tbody/tr')[:10]:
if i.xpath('.//td[7][contains(text(),"yes")]'):
#Grabbing IP and corresponding PORT
proxy = ":".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])
proxies.add(proxy)
return proxies
proxies = get_proxies()
proxy_pool = cycle(proxies)
url = 'https://httpbin.org/ip'
for i in range(1,11):
#Get a proxy from the pool
proxy = next(proxy_pool)
print("Request #%d"%i)
try:
response = requests.get(url,proxies={"http": proxy, "https": proxy})
print(response.json())
except:
#Most free proxies will often get connection errors. You will have retry the entire request using another proxy to work.
#We will just skip retries as its beyond the scope of this tutorial and we are only downloading a single url
print("Skipping. Connnection error") |
the-stack_106_22264 | import re
import requests
import CralwerSet.connect_mysql as connect_mysql
import threading
import CralwerSet.schedule as schedule
import time
import datetime
import json
from requests.packages import urllib3
urllib3.disable_warnings()
class mythread(threading.Thread):
def __init__(self, name, sc):
threading.Thread.__init__(self)
self.name = name
self.obj = sc
def run(self):
daemon(self.name, self.obj)
def get_imgs(url):
while True:
try:
response = requests.get(url, headers=headers, verify=False).text
result = json.loads(response)
img_list = result['data']['item']['images'][:5]
break
except KeyError:
print(url)
print("网页请求失败")
return True, False, True
except:
print('重新访问。。。')
time.sleep(500)
imgs = json.dumps(img_list)
popularity = int(result['data']['item']['favcount'])
evaluates = result['data']['seller']['evaluates']
grade = {}
for each in evaluates:
# 判断评分是否满足要求
if float(each['score']) < 4.6:
return False, False, False
if each['title'] == '宝贝描述':
title = 'bb'
elif each['title'] == '卖家服务':
title = 'mj'
else:
title = 'wl'
grade[title] = each['score']
return imgs, popularity, json.dumps(grade)
def daemon(name, sc):
conn_t = connect_mysql.w_shark_erp()
cur_t = conn_t.cursor()
while True:
try:
info = sc.pop()
except IndexError as e:
print(e)
return
url = f"https://acs.m.taobao.com/h5/mtop.taobao.detail.getdetail/6.0/?data=%7B%22itemNumId%22%3A%22{info[0]}%22%7D"
imgs, popularity, grade = get_imgs(url)
if not imgs:
print(print(info[0]),'不符合条件')
sql = f"""update cm_commodity set NEED=2 where URL_ID={info[0]} limit 1;"""
continue
elif imgs and not popularity and grade:
continue
else:
print('符合条件')
# sql = f"""update cm_commodity set IMG_URL='{imgs}',POPULARITY={popularity},GRADE='{grade}', NEED=1 where URL_ID={info[0]} limit 1;"""
sql = f"""update cm_commodity set IMG_URL='{imgs}',POPULARITY={popularity},GRADE='{grade}' where URL_ID={info[0]} limit 1;"""
conn_t.ping(True)
cur_t.execute(sql)
conn_t.commit()
def main():
while True:
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36',
}
sql = "select URL_ID from cm_commodity where NEED<>2 AND CREATE_DATE >= '2019-11-28' AND IMG_URL is null order by CREATE_DATE DESC;"
Schedule = schedule.schedule(sql, )
thread_list = []
for i in range(10):
thread_list.append(mythread(str(i + 1), Schedule, ))
for thread in thread_list:
thread.start()
except:
pass
time.sleep(600)
if __name__ == '__main__':
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36',
}
sql = "select URL_ID from cm_commodity where CREATE_DATE >= date_sub(now(),interval 2 day) AND IMG_URL is null order by CREATE_DATE DESC limit 10;"
while True:
try:
Schedule = schedule.schedule(sql, connect_mysql.w_shark_erp())
thread_list = []
for i in range(1):
thread_list.append(mythread(str(i + 1), Schedule, ))
for thread in thread_list:
thread.start()
time.sleep(1)
while True:
if not len(Schedule.classes):
print("新一轮数据更新")
break
else:
time.sleep(6)
continue
except:
print("程序报错,重新开始")
pass
|
the-stack_106_22268 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
'''Python interface to Linux process control mechanism.
Exports prctl system call.
See also prctl(2).
'''
import ctypes
from ctypes import c_int, c_ulong, c_char, c_char_p, POINTER
import functools
import os
__all__ = ['prctl']
__author__ = 'Brandon Carpenter <[email protected]>'
__version__ = '2.0'
def _prctl(option, *argtypes, **kwargs):
use_result = kwargs.pop('use_result', False)
assert not kwargs
global _libc
try:
_libc
except NameError:
_libc = ctypes.CDLL(None)
typeargs = [c_ulong]
paramflags = [(1, 'option')]
for i, argtype in enumerate(argtypes, 2):
assert 0 < len(argtype) <= 4
typeargs.append(argtype[0])
if len(argtype) == 1:
paramflags.append((1, 'arg%d' % i))
elif len(argtype) == 2:
paramflags.append((argtype[1], 'arg%d' % i))
else:
paramflags.append(argtype[1:])
def errcheck(result, func, args):
if result == -1:
errnum = ctypes.get_errno()
raise OSError(errnum, os.strerror(errnum))
if use_result:
return result
elif use_result is None:
return result, args
result = tuple(value.value for i, value in enumerate(args)
if paramflags[i][0] & 2)
if len(result) == 1:
return result[0]
return result or None
func = ctypes.CFUNCTYPE(c_int, *typeargs, use_errno=True)(
('prctl', _libc), tuple(paramflags))
func.errcheck = errcheck
return functools.partial(func, option)
def prctl(option, *args):
'''Perform control operations on a process using prctl(2).
Perform control operations on a process by passing in one of the
PR_GET_* or PR_SET_* options and any additional arguments as
specified by the prctl documentation. The result varies based on
the option. An OSError exception is raised on error.
See also prctl(2).
'''
return _prototypes[option](*args)
_prototypes = {}
def _prototype(name, number, *argtypes, **kwargs):
globs = globals()
option = 'PR_%s' % (name.upper(),)
func = _prctl(number, *argtypes, **kwargs)
func.__name__ = name
globs[option] = number
_prototypes[number] = globs[name] = func
__all__.extend([name, option])
# ---- Comments and values below were taken from <sys/prctl.h> ----
# Values to pass as first argument to prctl()
_prototype('set_pdeathsig', 1, (c_ulong,))
_prototype('get_pdeathsig', 2, (POINTER(c_int), 2))
# Get/set current->mm->dumpable
_prototype('get_dumpable', 3, use_result=True)
_prototype('set_dumpable', 4, (c_ulong,))
# Get/set unaligned access control bits (if meaningful)
_prototype('get_unalign', 5, (POINTER(c_int), 2))
_prototype('set_unalign', 6, (c_ulong,))
PR_UNALIGN_NOPRINT = 1 # silently fix up unaligned user accesses
PR_UNALIGN_SIGBUS = 2 # generate SIGBUS on unaligned user access
# Get/set whether or not to drop capabilities on setuid() away from
# uid 0 (as per security/commoncap.c)
_prototype('get_keepcaps', 7, use_result=True)
_prototype('set_keepcaps', 8, (c_ulong,))
# Get/set floating-point emulation control bits (if meaningful)
_prototype('get_fpemu', 9, (POINTER(c_int), 2))
_prototype('set_fpemu', 10, (c_ulong,))
PR_FPEMU_NOPRINT = 1 # silently emulate fp operations accesses
PR_FPEMU_SIGFPE = 2 # don't emulate fp operations, send SIGFPE instead
# Get/set floating-point exception mode (if meaningful)
_prototype('get_fpexc', 11, (POINTER(c_int), 2))
_prototype('set_fpexc', 12, (c_ulong,))
PR_FP_EXC_SW_ENABLE = 0x80 # Use FPEXC for FP exception enables
PR_FP_EXC_DIV = 0x010000 # floating point divide by zero
PR_FP_EXC_OVF = 0x020000 # floating point overflow
PR_FP_EXC_UND = 0x040000 # floating point underflow
PR_FP_EXC_RES = 0x080000 # floating point inexact result
PR_FP_EXC_INV = 0x100000 # floating point invalid operation
PR_FP_EXC_DISABLED = 0 # FP exceptions disabled
PR_FP_EXC_NONRECOV = 1 # async non-recoverable exc. mode
PR_FP_EXC_ASYNC = 2 # async recoverable exception mode
PR_FP_EXC_PRECISE = 3 # precise exception mode
# Get/set whether we use statistical process timing or accurate timestamp
# based process timing
_prototype('get_timing', 13, use_result=True)
_prototype('set_timing', 14, (c_ulong,))
PR_TIMING_STATISTICAL = 0 # Normal, traditional, statistical process timing
PR_TIMING_TIMESTAMP = 1 # Accurate timestamp based process timing
# Get/set process name
_prototype('set_name', 15, (c_char_p,))
_prototype('get_name', 16, (POINTER(c_char*16), 2))
# Get/set process endian
_prototype('get_endian', 19, (POINTER(c_int), 2))
_prototype('set_endian', 20, (c_ulong,))
PR_ENDIAN_BIG = 0
PR_ENDIAN_LITTLE = 1 # True little endian mode
PR_ENDIAN_PPC_LITTLE = 2 # "PowerPC" pseudo little endian
# Get/set process seccomp mode
_prototype('get_seccomp', 21, use_result=True)
_prototype('set_seccomp', 22, (c_ulong,))
# Get/set the capability bounding set (as per security/commoncap.c)
_prototype('capbset_read', 23, (c_ulong,), use_result=True)
_prototype('capbset_drop', 24, (c_ulong,))
# Get/set the process' ability to use the timestamp counter instruction
_prototype('get_tsc', 25, (POINTER(c_int), 2))
_prototype('set_tsc', 26, (c_ulong,))
PR_TSC_ENABLE = 1 # allow the use of the timestamp counter
PR_TSC_SIGSEGV = 2 # throw a SIGSEGV instead of reading the TSC
# Get/set securebits (as per security/commoncap.c)
_prototype('get_securebits', 27, use_result=True)
_prototype('set_securebits', 28, (c_ulong,))
# Get/set the timerslack as used by poll/select/nanosleep
# A value of 0 means "use default"
_prototype('set_timerslack', 29, (c_ulong,))
_prototype('get_timerslack', 30, use_result=True)
_prototype('task_perf_events_disable', 31)
_prototype('task_perf_events_enable', 32)
# Set early/late kill mode for hwpoison memory corruption.
# This influences when the process gets killed on a memory corruption.
_prototype('mce_kill', 33, (c_ulong,), (c_ulong,))
PR_MCE_KILL_CLEAR = 0
PR_MCE_KILL_SET = 1
PR_MCE_KILL_LATE = 0
PR_MCE_KILL_EARLY = 1
PR_MCE_KILL_DEFAULT = 2
_prototype('mce_kill_get', 34, use_result=True)
# Tune up process memory map specifics.
_prototype('set_mm', 35, (c_ulong,), (c_ulong,), (c_ulong, 1, 'arg4', 0))
PR_SET_MM_START_CODE = 1
PR_SET_MM_END_CODE = 2
PR_SET_MM_START_DATA = 3
PR_SET_MM_END_DATA = 4
PR_SET_MM_START_STACK = 5
PR_SET_MM_START_BRK = 6
PR_SET_MM_BRK = 7
PR_SET_MM_ARG_START = 8
PR_SET_MM_ARG_END = 9
PR_SET_MM_ENV_START = 10
PR_SET_MM_ENV_END = 11
PR_SET_MM_AUXV = 12
PR_SET_MM_EXE_FILE = 13
PR_SET_MM_MAP = 14
PR_SET_MM_MAP_SIZE = 15
# Set specific pid that is allowed to ptrace the current task.
# A value of 0 mean "no process".
_prototype('set_ptracer', 0x59616d61, (c_ulong,))
PR_SET_PTRACER_ANY = -1
_prototype('set_child_subreaper', 36, (c_ulong,))
_prototype('get_child_subreaper', 37, (POINTER(c_int), 2))
# If no_new_privs is set, then operations that grant new privileges (i.e.
# execve) will either fail or not grant them. This affects suid/sgid,
# file capabilities, and LSMs.
#
# Operations that merely manipulate or drop existing privileges (setresuid,
# capset, etc.) will still work. Drop those privileges if you want them gone.
#
# Changing LSM security domain is considered a new privilege. So, for example,
# asking selinux for a specific new context (e.g. with runcon) will result
# in execve returning -EPERM.
#
# See Documentation/prctl/no_new_privs.txt for more details.
_prototype('set_no_new_privs', 38, (c_ulong,))
_prototype('get_no_new_privs', 39, use_result=True)
_prototype('get_tid_address', 40, (POINTER(c_ulong), 2))
_prototype('set_thp_disable', 41, (c_ulong,))
_prototype('get_thp_disable', 42, use_result=True)
# Tell the kernel to start/stop helping userspace manage bounds tables.
_prototype('mpx_enable_management', 43)
_prototype('mpx_disable_management', 44)
_prototype('set_fp_mode', 45, (c_ulong,))
_prototype('get_fp_mode', 46, use_result=True)
PR_FP_MODE_FR = (1 << 0) # 64b FP registers
PR_FP_MODE_FRE = (1 << 1) # 32b compatibility
|
the-stack_106_22269 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^tests_list', views.tests_list),
url(r'^(?P<test_running_id>\d+)/update/', views.update),
url(r'^(?P<test_running_id>\d+)/rtot/', views.online_test_rtot),
url(r'^(?P<test_running_id>\d+)/success_rate/',
views.online_test_success_rate),
url(r'^(?P<test_running_id>\d+)/rps/', views.online_test_rps),
url(r'^(?P<test_running_id>\d+)/response_codes/',
views.online_test_response_codes),
url(r'^(?P<test_running_id>\d+)/aggregate/', views.online_test_aggregate),
url(r'^(?P<test_id>\d+)/online_page/', views.OnlinePage.as_view())
]
|
the-stack_106_22272 | # Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v2 Project action implementations"""
import logging
from keystoneauth1 import exceptions as ks_exc
from osc_lib.cli import parseractions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
import six
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
class CreateProject(command.ShowOne):
_description = _("Create new project")
def get_parser(self, prog_name):
parser = super(CreateProject, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<project-name>',
help=_('New project name'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('Project description'),
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help=_('Enable project (default)'),
)
enable_group.add_argument(
'--disable',
action='store_true',
help=_('Disable project'),
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_('Add a property to <name> '
'(repeat option to set multiple properties)'),
)
parser.add_argument(
'--or-show',
action='store_true',
help=_('Return existing project'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
enabled = True
if parsed_args.disable:
enabled = False
kwargs = {}
if parsed_args.property:
kwargs = parsed_args.property.copy()
try:
project = identity_client.tenants.create(
parsed_args.name,
description=parsed_args.description,
enabled=enabled,
**kwargs
)
except ks_exc.Conflict:
if parsed_args.or_show:
project = utils.find_resource(
identity_client.tenants,
parsed_args.name,
)
LOG.info(_('Returning existing project %s'), project.name)
else:
raise
# TODO(stevemar): Remove the line below when we support multitenancy
project._info.pop('parent_id', None)
return zip(*sorted(six.iteritems(project._info)))
class DeleteProject(command.Command):
_description = _("Delete project(s)")
def get_parser(self, prog_name):
parser = super(DeleteProject, self).get_parser(prog_name)
parser.add_argument(
'projects',
metavar='<project>',
nargs="+",
help=_('Project(s) to delete (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
errors = 0
for project in parsed_args.projects:
try:
project_obj = utils.find_resource(
identity_client.tenants,
project,
)
identity_client.tenants.delete(project_obj.id)
except Exception as e:
errors += 1
LOG.error(_("Failed to delete project with "
"name or ID '%(project)s': %(e)s"),
{'project': project, 'e': e})
if errors > 0:
total = len(parsed_args.projects)
msg = (_("%(errors)s of %(total)s projects failed "
"to delete.") % {'errors': errors, 'total': total})
raise exceptions.CommandError(msg)
class ListProject(command.Lister):
_description = _("List projects")
def get_parser(self, prog_name):
parser = super(ListProject, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
parser.add_argument(
'--sort',
metavar='<key>[:<direction>]',
help=_('Sort output by selected keys and directions (asc or desc) '
'(default: asc), repeat this option to specify multiple '
'keys and directions.'),
)
return parser
def take_action(self, parsed_args):
if parsed_args.long:
columns = ('ID', 'Name', 'Description', 'Enabled')
else:
columns = ('ID', 'Name')
data = self.app.client_manager.identity.tenants.list()
if parsed_args.sort:
data = utils.sort_items(data, parsed_args.sort)
return (columns,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class SetProject(command.Command):
_description = _("Set project properties")
def get_parser(self, prog_name):
parser = super(SetProject, self).get_parser(prog_name)
parser.add_argument(
'project',
metavar='<project>',
help=_('Project to modify (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('Set project name'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('Set project description'),
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help=_('Enable project'),
)
enable_group.add_argument(
'--disable',
action='store_true',
help=_('Disable project'),
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_('Set a project property '
'(repeat option to set multiple properties)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
)
kwargs = project._info
if parsed_args.name:
kwargs['name'] = parsed_args.name
if parsed_args.description:
kwargs['description'] = parsed_args.description
if parsed_args.enable:
kwargs['enabled'] = True
if parsed_args.disable:
kwargs['enabled'] = False
if parsed_args.property:
kwargs.update(parsed_args.property)
if 'id' in kwargs:
del kwargs['id']
if 'name' in kwargs:
# Hack around broken Identity API arg names
kwargs['tenant_name'] = kwargs['name']
del kwargs['name']
identity_client.tenants.update(project.id, **kwargs)
class ShowProject(command.ShowOne):
_description = _("Display project details")
def get_parser(self, prog_name):
parser = super(ShowProject, self).get_parser(prog_name)
parser.add_argument(
'project',
metavar='<project>',
help=_('Project to display (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
info = {}
try:
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
)
info.update(project._info)
except ks_exc.Forbidden:
auth_ref = self.app.client_manager.auth_ref
if (
parsed_args.project == auth_ref.project_id or
parsed_args.project == auth_ref.project_name
):
# Ask for currently auth'ed project so return it
info = {
'id': auth_ref.project_id,
'name': auth_ref.project_name,
# True because we don't get this far if it is disabled
'enabled': True,
}
else:
raise
# TODO(stevemar): Remove the line below when we support multitenancy
info.pop('parent_id', None)
# NOTE(stevemar): Property handling isn't really supported in Keystone
# and needs a lot of extra handling. Let's reserve the properties that
# the API has and handle the extra top level properties.
reserved = ('name', 'id', 'enabled', 'description')
properties = {}
for k, v in info.items():
if k not in reserved:
# If a key is not in `reserved` it's a property, pop it
info.pop(k)
# If a property has been "unset" it's `None`, so don't show it
if v is not None:
properties[k] = v
info['properties'] = utils.format_dict(properties)
return zip(*sorted(six.iteritems(info)))
class UnsetProject(command.Command):
_description = _("Unset project properties")
def get_parser(self, prog_name):
parser = super(UnsetProject, self).get_parser(prog_name)
parser.add_argument(
'project',
metavar='<project>',
help=_('Project to modify (name or ID)'),
)
parser.add_argument(
'--property',
metavar='<key>',
action='append',
default=[],
help=_('Unset a project property '
'(repeat option to unset multiple properties)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
)
kwargs = project._info
for key in parsed_args.property:
if key in kwargs:
kwargs[key] = None
identity_client.tenants.update(project.id, **kwargs)
|
the-stack_106_22273 | import math
import numpy as np
import torch
from mmcv.runner import auto_fp16
from torch import nn
from ..builder import MIDDLE_ENCODERS
from mmdet3d.ops import flat2window, window2flat
import random
import pickle as pkl
import os
@MIDDLE_ENCODERS.register_module()
class SSTInputLayer(nn.Module):
"""
This is one of the core class of SST, converting the output of voxel_encoder to sst input.
There are 3 things to be done in this class:
1. Reginal Grouping : assign window indices to each voxel.
2. Voxel drop and region batching: see our paper for detail
3. Pre-computing the transfomation information for converting flat features ([N x C]) to region features ([R, T, C]). R is the number of regions containing at most T tokens (voxels). See function flat2window and window2flat for details.
Main args:
drop_info (dict): drop configuration for region batching.
window_shape (tuple[int]): (num_x, num_y). Each window is divided to num_x * num_y pillars (including empty pillars).
shift_list (list[tuple]): [(shift_x, shift_y), ]. shift_x = 5 means all windonws will be shifted for 5 voxels along positive direction of x-aixs.
debug: apply strong assertion for developing.
"""
def __init__(self,
drop_info,
shifts_list,
window_shape,
point_cloud_range,
voxel_size,
shuffle_voxels=True,
debug=True,
):
super().__init__()
self.fp16_enabled = False
self.meta_drop_info = drop_info
self.shifts_list = shifts_list
self.point_cloud_range = point_cloud_range
self.voxel_size = voxel_size
self.shuffle_voxels = shuffle_voxels
self.debug = debug
self.window_shape = window_shape
@auto_fp16(apply_to=('voxel_feat', ))
def forward(self, voxel_feat, coors):
'''
Args:
voxel_feat: shape=[N, C], N is the voxel num in the batch.
coors: shape=[N, 4], [b, z, y, x]
Returns:
feat_3d_dict: contains region features (feat_3d) of each region batching level. Shape of feat_3d is [num_windows, num_max_tokens, C].
flat2win_inds_list: two dict containing transformation information for non-shifted grouping and shifted grouping, respectively. The two dicts are used in function flat2window and window2flat.
voxel_info: dict containing extra information of each voxel for usage in the backbone.
'''
self.set_drop_info()
voxel_info = {}
coors = coors.long()
if self.shuffle_voxels:
# shuffle the voxels to make the drop process uniform.
num_voxel = len(voxel_feat)
shuffle_inds = torch.randperm(num_voxel)
voxel_feat = voxel_feat[shuffle_inds]
coors = coors[shuffle_inds]
for k, tensor in voxel_info.items():
if isinstance(tensor, torch.Tensor) and len(tensor) == num_voxel:
voxel_info[k] = tensor[shuffle_inds]
voxel_info = self.window_partition(coors, voxel_info)
voxel_info = self.get_voxel_keep_inds(voxel_info, len(self.shifts_list)) # voxel_info is updated in this function
voxel_keep_inds = voxel_info['voxel_keep_inds']
voxel_num_before_drop = len(voxel_feat)
voxel_feat = voxel_feat[voxel_keep_inds]
coors = coors[voxel_keep_inds]
voxel_info['coors'] = coors
# Some other variables need to be dropped.
for k, v in voxel_info.items():
if isinstance(v, torch.Tensor) and len(v) == voxel_num_before_drop:
voxel_info[k] = v[voxel_keep_inds]
flat2win_inds_list = [
self.get_flat2win_inds(voxel_info[f'batch_win_inds_shift{i}'], voxel_info[f'voxel_drop_level_shift{i}'])
for i in range(len(self.shifts_list))
]
if self.debug:
coors_3d_dict_shift0 = flat2window(coors, voxel_info['voxel_drop_level_shift0'], flat2win_inds_list[0], self.drop_info)
coors_2d = window2flat(coors_3d_dict_shift0, flat2win_inds_list[0])
assert (coors_2d == coors).all()
return voxel_feat, flat2win_inds_list, voxel_info
@torch.no_grad()
def get_flat2win_inds(self, batch_win_inds, voxel_drop_lvl):
'''
Args:
batch_win_inds: shape=[N, ]. Indicates which window a voxel belongs to. Window inds is unique is the whole batch.
voxel_drop_lvl: shape=[N, ]. Indicates batching_level of the window the voxel belongs to.
Returns:
flat2window_inds_dict: contains flat2window_inds of each voxel, shape=[N,]
Determine the voxel position in range [0, num_windows * max_tokens) of each voxel.
'''
device = batch_win_inds.device
flat2window_inds_dict = {}
drop_info = self.drop_info
for dl in drop_info:
dl_mask = voxel_drop_lvl == dl
if not dl_mask.any():
continue
conti_win_inds = self.make_continuous_inds(batch_win_inds[dl_mask])
num_windows = len(torch.unique(conti_win_inds))
max_tokens = drop_info[dl]['max_tokens']
inner_win_inds = self.get_inner_win_inds(conti_win_inds)
flat2window_inds = conti_win_inds * max_tokens + inner_win_inds
flat2window_inds_dict[dl] = (flat2window_inds, torch.where(dl_mask))
if self.debug:
assert inner_win_inds.max() < max_tokens, f'Max inner inds({inner_win_inds.max()}) larger(equal) than {max_tokens}'
assert (flat2window_inds >= 0).all()
max_ind = flat2window_inds.max().item()
assert max_ind < num_windows * max_tokens, f'max_ind({max_ind}) larger than upper bound({num_windows * max_tokens})'
assert max_ind >= (num_windows-1) * max_tokens, f'max_ind({max_ind}) less than lower bound({(num_windows-1) * max_tokens})'
return flat2window_inds_dict
@torch.no_grad()
def get_inner_win_inds(self, win_inds):
'''
Fast version of get_innner_win_inds_slow
Args:
win_inds indicates which windows a voxel belongs to. Voxels share a window have same inds.
shape = [N,]
Return:
inner_inds: shape=[N,]. Indicates voxel's id in a window. if M voxels share a window, their inner_inds would be torch.arange(M, dtype=torch.long)
Note that this function might output different results from get_inner_win_inds_slow due to the unstable pytorch sort.
'''
sort_inds, order = win_inds.sort() #sort_inds is like [0,0,0, 1, 2,2] -> [0,1, 2, 0, 0, 1]
roll_inds_left = torch.roll(sort_inds, -1) # [0,0, 1, 2,2,0]
diff = sort_inds - roll_inds_left #[0, 0, -1, -1, 0, 2]
end_pos_mask = diff != 0
bincount = torch.bincount(win_inds)
# assert bincount.max() <= max_tokens
unique_sort_inds, _ = torch.sort(torch.unique(win_inds))
num_tokens_each_win = bincount[unique_sort_inds] #[3, 1, 2]
template = torch.ones_like(win_inds) #[1,1,1, 1, 1,1]
template[end_pos_mask] = (num_tokens_each_win-1) * -1 #[1,1,-2, 0, 1,-1]
inner_inds = torch.cumsum(template, 0) #[1,2,0, 0, 1,0]
inner_inds[end_pos_mask] = num_tokens_each_win #[1,2,3, 1, 1,2]
inner_inds -= 1 #[0,1,2, 0, 0,1]
#recover the order
inner_inds_reorder = -torch.ones_like(win_inds)
inner_inds_reorder[order] = inner_inds
##sanity check
if self.debug:
assert (inner_inds >= 0).all()
assert (inner_inds == 0).sum() == len(unique_sort_inds)
assert (num_tokens_each_win > 0).all()
random_win = unique_sort_inds[random.randint(0, len(unique_sort_inds)-1)]
random_mask = win_inds == random_win
num_voxel_this_win = bincount[random_win].item()
random_inner_inds = inner_inds_reorder[random_mask]
assert len(torch.unique(random_inner_inds)) == num_voxel_this_win
assert random_inner_inds.max() == num_voxel_this_win - 1
assert random_inner_inds.min() == 0
return inner_inds_reorder
def get_inner_win_inds_slow(self, win_inds):
unique_win_inds = torch.unique(win_inds)
inner_inds = -torch.ones_like(win_inds)
for ind in unique_win_inds:
mask = win_inds == ind
num = mask.sum().item()
inner_inds[mask] = torch.arange(num, dtype=win_inds.dtype, device=win_inds.device)
assert (inner_inds >= 0).all()
return inner_inds
def drop_single_shift(self, batch_win_inds):
drop_info = self.drop_info
drop_lvl_per_voxel = -torch.ones_like(batch_win_inds)
inner_win_inds = self.get_inner_win_inds(batch_win_inds)
bincount = torch.bincount(batch_win_inds)
num_per_voxel_before_drop = bincount[batch_win_inds] #
target_num_per_voxel = torch.zeros_like(batch_win_inds)
for dl in drop_info:
max_tokens = drop_info[dl]['max_tokens']
lower, upper = drop_info[dl]['drop_range']
range_mask = (num_per_voxel_before_drop >= lower) & (num_per_voxel_before_drop < upper)
target_num_per_voxel[range_mask] = max_tokens
drop_lvl_per_voxel[range_mask] = dl
if self.debug:
assert (target_num_per_voxel > 0).all()
assert (drop_lvl_per_voxel >= 0).all()
keep_mask = inner_win_inds < target_num_per_voxel
return keep_mask, drop_lvl_per_voxel
@torch.no_grad()
def get_voxel_keep_inds(self, voxel_info, num_shifts):
'''
To make it clear and easy to follow, we do not use loop to process two shifts.
'''
batch_win_inds_s0 = voxel_info['batch_win_inds_shift0']
num_all_voxel = batch_win_inds_s0.shape[0]
voxel_keep_inds = torch.arange(num_all_voxel, device=batch_win_inds_s0.device, dtype=torch.long)
keep_mask_s0, drop_lvl_s0 = self.drop_single_shift(batch_win_inds_s0)
if self.debug:
assert (drop_lvl_s0 >= 0).all()
drop_lvl_s0 = drop_lvl_s0[keep_mask_s0]
voxel_keep_inds = voxel_keep_inds[keep_mask_s0]
batch_win_inds_s0 = batch_win_inds_s0[keep_mask_s0]
if num_shifts == 1:
voxel_info['voxel_keep_inds'] = voxel_keep_inds
voxel_info['voxel_drop_level_shift0'] = drop_lvl_s0
voxel_info['batch_win_inds_shift0'] = batch_win_inds_s0
return voxel_info
batch_win_inds_s1 = voxel_info['batch_win_inds_shift1']
batch_win_inds_s1 = batch_win_inds_s1[keep_mask_s0]
keep_mask_s1, drop_lvl_s1 = self.drop_single_shift(batch_win_inds_s1)
if self.debug:
assert (drop_lvl_s1 >= 0).all()
# drop data in first shift again
drop_lvl_s0 = drop_lvl_s0[keep_mask_s1]
voxel_keep_inds = voxel_keep_inds[keep_mask_s1]
batch_win_inds_s0 = batch_win_inds_s0[keep_mask_s1]
drop_lvl_s1 = drop_lvl_s1[keep_mask_s1]
batch_win_inds_s1 = batch_win_inds_s1[keep_mask_s1]
voxel_info['voxel_keep_inds'] = voxel_keep_inds
voxel_info['voxel_drop_level_shift0'] = drop_lvl_s0
voxel_info['batch_win_inds_shift0'] = batch_win_inds_s0
voxel_info['voxel_drop_level_shift1'] = drop_lvl_s1
voxel_info['batch_win_inds_shift1'] = batch_win_inds_s1
### sanity check
if self.debug:
for dl in self.drop_info:
max_tokens = self.drop_info[dl]['max_tokens']
mask_s0 = drop_lvl_s0 == dl
if not mask_s0.any():
print(f'No voxel belongs to drop_level:{dl} in shift 0')
continue
real_max = torch.bincount(batch_win_inds_s0[mask_s0]).max()
assert real_max <= max_tokens, f'real_max({real_max}) > {max_tokens} in shift0'
mask_s1 = drop_lvl_s1 == dl
if not mask_s1.any():
print(f'No voxel belongs to drop_level:{dl} in shift 1')
continue
real_max = torch.bincount(batch_win_inds_s1[mask_s1]).max()
assert real_max <= max_tokens, f'real_max({real_max}) > {max_tokens} in shift1'
###
return voxel_info
@torch.no_grad()
def window_partition(self, coors, voxel_info):
shifts_list = self.shifts_list
win_shape_x, win_shape_y = self.window_shape
pc_range = self.point_cloud_range
voxel_size = self.voxel_size # using the min voxel size
assert isinstance(voxel_size, tuple)
bev_shape_x = int(np.ceil((pc_range[3] - pc_range[0])/voxel_size[0]))
bev_shape_y = int(np.ceil((pc_range[4] - pc_range[1])/voxel_size[1]))
max_num_win_x = int(np.ceil((bev_shape_x / win_shape_x)) + 1) # plus one here to meet the needs of shift.
max_num_win_y = int(np.ceil((bev_shape_y / win_shape_y)) + 1) # plus one here to meet the needs of shift.
max_num_win_per_sample = max_num_win_x * max_num_win_y
for i in range(len(shifts_list)):
shift_x, shift_y = shifts_list[i]
assert shift_x == 0 or shift_x == win_shape_x // 2, 'Usually ...'
shifted_coors_x = coors[:, 3] + (win_shape_x - shift_x if shift_x > 0 else 0)
shifted_coors_y = coors[:, 2] + (win_shape_y - shift_y if shift_y > 0 else 0)
win_coors_x = shifted_coors_x // win_shape_x
win_coors_y = shifted_coors_y // win_shape_y
batch_win_inds = coors[:, 0] * max_num_win_per_sample + win_coors_x * max_num_win_y + win_coors_y
voxel_info[f'batch_win_inds_shift{i}'] = batch_win_inds
coors_in_win_x = shifted_coors_x % win_shape_x
coors_in_win_y = shifted_coors_y % win_shape_y
voxel_info[f'coors_in_win_shift{i}'] = torch.stack([coors_in_win_x, coors_in_win_y], dim=-1)
return voxel_info
@torch.no_grad()
def make_continuous_inds(self, inds):
'''
Make batch_win_inds continuous, e.g., [1, 3, 4, 6, 10] -> [0, 1, 2, 3, 4].
'''
dtype = inds.dtype
device = inds.device
unique_inds, _ = torch.sort(torch.unique(inds))
num_valid_inds = len(unique_inds)
max_origin_inds = unique_inds.max().item()
canvas = -torch.ones((max_origin_inds+1,), dtype=dtype, device=device)
canvas[unique_inds] = torch.arange(num_valid_inds, dtype=dtype, device=device)
conti_inds = canvas[inds]
if self.debug:
assert conti_inds.max() == len(torch.unique(conti_inds)) - 1, 'Continuity check failed.'
assert conti_inds.min() == 0, '-1 in canvas should not be indexed.'
return conti_inds
def set_drop_info(self):
if hasattr(self, 'drop_info'):
return
meta = self.meta_drop_info
if isinstance(meta, tuple):
if self.training:
self.drop_info = meta[0]
else:
self.drop_info = meta[1]
else:
self.drop_info = meta
print(f'drop_info is set to {self.drop_info}, in input_layer') |
the-stack_106_22275 | import core
try:
from core.settings.local import DEBUG
except ImportError:
DEBUG = False
try:
from core.settings.local import ENABLE_DEBUG_TOOLBAR
except ImportError:
ENABLE_DEBUG_TOOLBAR = False
ADMINS = (
('Sergey Podolsky', '[email protected]'),
)
MANAGERS = ADMINS
LANGUAGE_CODE = 'en-us'
LANGUAGE_NAME = 'English'
LANGUAGE_NAME_LOCAL = 'English'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Site ID
SITE_ID = 1
VERSIONS = {
'core': core.__versionstr__,
}
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# List of callables that know how to import templates from various sources.
MIDDLEWARE = (
'core.ddosprotection.DDOSMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware', # for AJAX POST protection with csrf
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
'core.oauth.CustomSocialAuthException.CustomSocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'core.urls'
# Auth
AUTHENTICATION_BACKENDS = (
'core.oauth.Cernauth2.CernAuthOIDC',
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.github.GithubOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
AUTH_USER_MODEL = 'oauth.BPUser'
SOCIAL_AUTH_URL_NAMESPACE = 'social'
SOCIAL_AUTH_STRATEGY = 'social_django.strategy.DjangoStrategy'
SOCIAL_AUTH_STORAGE = 'social_django.models.DjangoStorage'
REDIRECT_STATE = False
LOGIN_URL = 'login'
#SOCIAL_AUTH_EXTRA_DATA = True
SOCIAL_AUTH_LOGIN_ERROR_URL = '/loginerror/'
LOGIN_REDIRECT_URL = '/'
# Google OAuth2 (google-oauth2)
SOCIAL_AUTH_GOOGLE_OAUTH2_IGNORE_DEFAULT_SCOPE = True
SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = [
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile'
]
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
)
# installed apps
INSTALLED_APPS_DJANGO_FRAMEWORK = (
# Django framework
'social_django',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
)
INSTALLED_APPS_DJANGO_PLUGINS = (
# Django plugins
'rest_framework',
'django_datatables_view',
'djangojs',
'django_extensions',
)
COMMON_INSTALLED_APPS = \
INSTALLED_APPS_DJANGO_FRAMEWORK + \
INSTALLED_APPS_DJANGO_PLUGINS
INSTALLED_APPS_BIGPANDAMON_CORE = (
# BigPanDAmon core
'core.oauth',
'core.common',
'core.pandajob',
'core.schedresource',
'core.dashboards',
'core.status_summary',
)
INSTALLED_APPS_EXTRA = [
# "core.admin",
"core.art",
"core.buildmonitor",
"core.compare",
"core.errorsscattering",
"core.filebrowser",
# "core.globalpage",
"core.globalshares",
"core.grafana",
"core.harvester",
"core.iDDS",
"core.mlflowdynamic",
"core.monitor",
"core.oi",
"core.pbm",
"core.reports",
"core.runningprod"
]
if len(INSTALLED_APPS_EXTRA) > 0:
INSTALLED_APPS_BIGPANDAMON_CORE += tuple([str(app_name) for app_name in INSTALLED_APPS_EXTRA])
# Django.js config
JS_I18N_APPS = ()
JS_I18N_APPS_EXCLUDE = INSTALLED_APPS_BIGPANDAMON_CORE
INSTALLED_APPS = COMMON_INSTALLED_APPS + INSTALLED_APPS_BIGPANDAMON_CORE
if DEBUG and ENABLE_DEBUG_TOOLBAR:
MIDDLEWARE += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INSTALLED_APPS += (
'debug_toolbar',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
INTERNAL_IPS = ('127.0.0.1', '192.168.0.1', '188.184.69.142')
DEBUG_TOOLBAR_CONFIG = {'INTERCEPT_REDIRECTS': False, }
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
# Throwing AttributeError: 'module' object has no attribute 'getrusage'
'debug_toolbar.panels.timer.TimerPanel',
# 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
# 'debug_toolbar.panels.logger.LoggingPanel',
)
SESSION_SERIALIZER = "core.libs.CustomJSONSerializer.CustomJSONSerializer"
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
# email
EMAIL_SUBJECT_PREFIX = '[BigPanDAmon]'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
DKB_CAMPAIGN_URL = 'http://aiatlas172.cern.ch:5080/campaign/stat'
ML_FLOW_UPSTREAM = 'https://bigpanda-mlflow.web.cern.ch/'
DATA_CAROUSEL_MAIL_DELAY_DAYS = 10
DATA_CAROUSEL_MAIL_REPEAT = 1
DATA_CARUSEL_MAIL_RECIPIENTS = ['[email protected]', '[email protected]', '[email protected]'] |
the-stack_106_22276 | #!/usr/bin/env python3
"""Sample Sheet generation for BCL2FASTQ pipeline
"""
# --- standard library imports
#
import sys
import os
import logging
import argparse
from collections import namedtuple
import xml.etree.ElementTree as ET
#--- third-party imports
#
import requests
import yaml
#--- project specific imports
#
# add lib dir for this pipeline installation to PYTHONPATH
LIB_PATH = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "lib"))
if LIB_PATH not in sys.path:
sys.path.insert(0, LIB_PATH)
from config import rest_services
from config import bcl2fastq_conf
from pipelines import get_machine_run_flowcell_id
from pipelines import email_for_user
from pipelines import send_mail
from pipelines import is_devel_version
from pipelines import user_mail_mapper
# WARNING changes here, must be reflected in bcl2fastq.py as well
MuxUnit = namedtuple('MuxUnit', ['run_id', 'flowcell_id', 'mux_id', 'lane_ids',
'mux_dir', 'barcode_mismatches', 'requestor_email',
'samplesheet', 'bcl2fastq_custom_args', 'tool'])
__author__ = "Lavanya Veeravalli"
__email__ = "[email protected]"
__copyright__ = "2016 Genome Institute of Singapore"
__license__ = "The MIT License (MIT)"
# global logger
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
'[{asctime}] {levelname:8s} {filename} {message}', style='{'))
logger.addHandler(handler)
SAMPLESHEET_CSV = "*samplesheet.csv"
MUXINFO_CFG = "muxinfo.yaml"
STATUS_CFG = "status.txt"
DEFAULT_BARCODE_MISMATCHES = None
TOOL = "bcl2fastq"
SAMPLESHEET_HEADER = '[Data]'+'\n'+ 'Lane,Sample_ID,Sample_Name,Sample_Plate,' \
'Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description'
def getdirs(args):
"""gets directories from args and checks existance
"""
rundir = args.rundir
if not os.path.exists(rundir):
logger.fatal("rundir '%s' does not exist under Run directory.\n", rundir)
sys.exit(1)
runinfo = os.path.join(rundir + '/RunInfo.xml')
if not os.path.exists(runinfo):
logger.fatal("RunInfo '%s' does not exist under Run directory.\n", runinfo)
sys.exit(1)
outdir = args.outdir
if not os.path.exists(outdir):
logger.fatal("output directory '%s' does not exist.\n", outdir)
sys.exit(1)
return(rundir, outdir, runinfo)
def email_non_bcl(libraryId, runId):
"""send email for non-bcl libraries
"""
if is_devel_version():
toaddr = email_for_user()
else:
toaddr = "[email protected]"
subject = "bcl2fastq conversion not required for {} from run {}.".format(
libraryId, runId)
body = subject + "\n" + "Kindly start custom analysis manually. Thanks."
send_mail(subject, body, toaddr=toaddr, pass_exception=False)
def get_ub_str_index(ub, create_index, bc, non_mux_tech, libraryId):
"""use_bases for index reads
"""
if create_index:
if bc > 0:
ub += 'I'+str(bc)+'n*,'
elif bc < 0:
if non_mux_tech or "MUX" not in libraryId:
ub += 'I*' + ','
else:
ub += 'n*' + ','
else:
if bc > 0:
ub += 'I'+str(bc)+'n*,'
else:
ub += 'n*'+','
return ub
def generate_usebases(barcode_lens, runinfo, create_index, non_mux_tech, libraryId):
"""generate use_bases param
"""
tree = ET.parse(runinfo)
root = tree.getroot()
ub_list = dict()
readLength_list = []
# for each lane and its barcode lengths
for k, v in sorted(barcode_lens.items()):
# v is list of barcode_len tuples
assert len(set(v)) == 1, ("Different barcode length in lane {}".format(k))
bc1, bc2 = v[0]# since all v's are the same
ub = ""
for read in root.iter('Read'):
numcyc = int(read.attrib['NumCycles'])
if read.attrib['IsIndexedRead'] == 'N':
ub += 'Y*,'
readLength_list.append(numcyc)
elif read.attrib['IsIndexedRead'] == 'Y':
if read.attrib['Number'] == '2': ### BC1
ub = get_ub_str_index(ub, create_index, bc1, non_mux_tech, libraryId)
elif read.attrib['Number'] == '3': ### BC2
ub = get_ub_str_index(ub, create_index, bc2, non_mux_tech, libraryId)
ub = ub[:-1]
ub_list[k] = ub
return ub_list, readLength_list
def get_rest_data(run_num, test_server=None):
""" Get rest info from ELM
"""
if test_server:
rest_url = rest_services['run_details']['testing'].replace("run_num", run_num)
logger.info("development server")
else:
rest_url = rest_services['run_details']['production'].replace("run_num", run_num)
logger.info("production server")
response = requests.get(rest_url)
if response.status_code != requests.codes.ok:
response.raise_for_status()
sys.exit(1)
rest_data = response.json()
logger.debug("rest_data from %s: %s", rest_url, rest_data)
return rest_data
def generate_samplesheet(rest_data, flowcellid, outdir, runinfo):
"""Generates sample sheet, mux_info and bcl2fastq custom params
"""
barcode_lens = {}
mux_units = dict()
lib_list = dict()
run_id = rest_data['runId']
muxinfo_cfg = os.path.join(outdir, MUXINFO_CFG)
non_mux_tech = False
for rows in rest_data['lanes']:
BCL_Mismatch = []
tool = []
if 'requestor' in rows:
requestor = rows['requestor']
requestor_email = user_mail_mapper(requestor)
else:
requestor_email = None
pass_bcl2_fastq = False
#MUX library
if "MUX" in rows['libraryId']:
for child in rows['Children']:
if 'BCL_Mismatch' in child:
BCL_Mismatch.append(child['BCL_Mismatch'])
#No analysis required
if any(libtech in child['libtech'] for libtech in bcl2fastq_conf['non_bcl_tech']):
logger.info("send_mail: bcl not required for %s", rows['libraryId'])
email_non_bcl(rows['libraryId'], rest_data['runId'])
pass_bcl2_fastq = True
break
#Non-mux libraries like Sarah's
if any(libtech in child['libtech'] for libtech in bcl2fastq_conf['non_mux_tech']):
sample = rows['laneId']+',Sample_'+rows['libraryId']+','+rows['libraryId']+ \
'-NoIndex'+',,,,,,,'+'Project_'+rows['libraryId']+','+child['libtech']
lib_list.setdefault(rows['libraryId'], []).append(sample)
index_lens = (-1, -1)
non_mux_tech = True
barcode_lens.setdefault(rows['laneId'], []).append(index_lens)
break
#tool info
tool = [v for k, v in bcl2fastq_conf['tool'].items() if k in child['libtech']]
if "-" in child['barcode']:
# dual index
index = child['barcode'].split('-')
sample = rows['laneId']+',Sample_'+child['libraryId']+','+ \
child['libraryId']+'-'+child['barcode']+',,,,'+ index[0] +',,'+ \
index[1] + ',' +'Project_'+rows['libraryId']+','+child['libtech']
index_lens = (len((index[0])), len((index[1])))
else:
sample = rows['laneId']+',Sample_'+child['libraryId']+','+ \
child['libraryId']+'-'+child['barcode']+',,,,'+child['barcode']+',,,'\
+'Project_'+rows['libraryId']+','+child['libtech']
index_lens = (len(child['barcode']), -1)
barcode_lens.setdefault(rows['laneId'], []).append(index_lens)
lib_list.setdefault(rows['libraryId'], []).append(sample)
else:
#No analysis required
if rows['libtech'] in bcl2fastq_conf['non_bcl_tech']:
logger.info("send_mail: bcl not required for %s", rows['libraryId'])
email_non_bcl(rows['libraryId'], rest_data['runId'])
pass_bcl2_fastq = True
continue
sample = rows['laneId']+',Sample_'+rows['libraryId']+','+rows['libraryId']+ \
'-NoIndex'+',,,,,,,'+'Project_'+rows['libraryId']+','+rows['libtech']
lib_list.setdefault(rows['libraryId'], []).append(sample)
index_lens = (-1, -1)
barcode_lens.setdefault(rows['laneId'], []).append(index_lens)
tool = [v for k, v in bcl2fastq_conf['tool'].items() if k in rows['libtech']]
if pass_bcl2_fastq:
continue
#Barcode mismatch has to be the same for all the libraries in one MUX.
#Otherwise default mismatch value to be used
if len(set(BCL_Mismatch)) == 1:
barcode_mismatches = BCL_Mismatch[0]
else:
barcode_mismatches = DEFAULT_BARCODE_MISMATCHES
if len(set(tool)) == 1:
tool_name = tool[0]
else:
tool_name = TOOL
#Check adpter trimming
if 'trimadapt' in rows and rows['trimadapt']:
adapt_seq = rows.get('adapterseq').split(',')
lib_list.setdefault(rows['libraryId'], []).append('[Settings]')
for seq in adapt_seq:
reads = seq.split(':')
if reads[0].strip() == "Read 1":
adapter = "Adapter," + reads[1].lstrip()
lib_list.setdefault(rows['libraryId'], []).append(adapter)
elif reads[0].strip() == "Read 2":
adapter = "AdapterRead2," + reads[1].lstrip()
lib_list.setdefault(rows['libraryId'], []).append(adapter)
samplesheet = os.path.abspath(os.path.join(outdir, rows['libraryId'] + "_samplesheet.csv"))
create_index = False
if 'indexreads' in rows and rows['indexreads']:
create_index = True
usebases, readLength_list = generate_usebases(barcode_lens, runinfo, create_index, \
non_mux_tech, rows['libraryId'])
use_bases_mask = " --use-bases-mask " + rows['laneId'] + ":" + usebases[rows['laneId']]
bcl2fastq_custom_args = use_bases_mask
if 'indexreads' in rows and rows['indexreads']:
bcl2fastq_custom_args = " ".join([bcl2fastq_custom_args, \
bcl2fastq_conf['bcl2fastq_custom_args']['indexreads']])
readLength_list.sort()
#if barcode_lens:
del barcode_lens[rows['laneId']]
#bcl2fastq_custom_args to be added if any of the R1 or R2 less than minReadLength
if readLength_list[0] < bcl2fastq_conf['minReadLength']:
minReadLength_params = bcl2fastq_conf['bcl2fastq_custom_args']['minReadLength']
param_a = " " + minReadLength_params[0] + " " + str(readLength_list[0])
param_b = " " + minReadLength_params[1] + " 0"
bcl2fastq_custom_args += param_a
bcl2fastq_custom_args += param_b
mu = MuxUnit._make([run_id, flowcellid, rows['libraryId'], [rows['laneId']], \
'Project_'+ rows['libraryId'], barcode_mismatches, requestor_email, samplesheet, \
[bcl2fastq_custom_args], tool_name])
# merge lane into existing mux if needed
if mu.mux_id in mux_units:
mu_orig = mux_units[mu.mux_id]
assert mu.barcode_mismatches == mu_orig.barcode_mismatches
assert len(mu.lane_ids) == 1# is a list by design but just one element.
#otherwise below fails
lane_ids = mu_orig.lane_ids.extend(mu.lane_ids)
bcl2fastq_custom_args = mu_orig.bcl2fastq_custom_args.append(use_bases_mask)
mu_orig = mu_orig._replace(lane_ids=lane_ids, bcl2fastq_custom_args= \
bcl2fastq_custom_args)
else:
mux_units[mu.mux_id] = mu
#Write muxinfo_cfg and Samplesheet
if mux_units:
with open(muxinfo_cfg, 'w') as fh:
fh.write(yaml.dump([dict(mu._asdict()) for mu in mux_units.values()], \
default_flow_style=True))
for lib, value in lib_list.items():
csv = mux_units[lib].samplesheet
with open(csv, 'w') as fh_out:
fh_out.write(SAMPLESHEET_HEADER + '\n')
for each in value:
fh_out.write(str(each)+ '\n')
fh_out.write('[Reads]' + '\n')
for reads in readLength_list:
fh_out.write(str(reads) + '\n')
return True
else:
return False
def main():
"""
The main function
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--force-overwrite",
action="store_true",
help="Force overwriting of output files")
parser.add_argument("-r", "--rundir",
dest="rundir",
required=True,
help="rundir, e.g. /mnt/seq/userrig/HS004/HS004-PE-R00139_BC6A7HANXX")
parser.add_argument('-t', "--test-server", action='store_true')
parser.add_argument("-o", "--outdir",
required=True,
dest="outdir",
help="Output directory")
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Increase verbosity")
parser.add_argument('-q', '--quiet', action='count', default=0,
help="Decrease verbosity")
args = parser.parse_args()
# Repeateable -v and -q for setting logging level.
# See https://www.reddit.com/r/Python/comments/3nctlm/what_python_tools_should_i_be_using_on_every/
# and https://gist.github.com/andreas-wilm/b6031a84a33e652680d4
# script -vv -> DEBUG
# script -v -> INFO
# script -> WARNING
# script -q -> ERROR
# script -qq -> CRITICAL
# script -qqq -> no logging at all
logger.setLevel(logging.WARN + 10*args.quiet - 10*args.verbose)
(rundir, outdir, runinfo) = getdirs(args)
samplesheet_csv = os.path.join(outdir, SAMPLESHEET_CSV)
muxinfo_cfg = os.path.join(outdir, MUXINFO_CFG)
for f in [samplesheet_csv, muxinfo_cfg]:
if not args.force_overwrite and os.path.exists(f):
logger.fatal("Refusing to overwrite existing file %s", f)
sys.exit(1)
_, run_num, flowcellid = get_machine_run_flowcell_id(rundir)
logger.info("Querying ELM for %s", run_num)
rest_data = get_rest_data(run_num, args.test_server)
status_cfg = os.path.join(outdir, STATUS_CFG)
assert rest_data['runId'], ("Rest data from ELM does not have runId {}".format(run_num))
if rest_data['runPass'] != 'Pass':
logger.warning("Skipping non-passed run")
with open(status_cfg, 'w') as fh_out:
fh_out.write("SEQRUNFAILED")
sys.exit(0)
status = generate_samplesheet(rest_data, flowcellid, outdir, runinfo)
if not status:
with open(status_cfg, 'w') as fh_out:
fh_out.write("NON-BCL")
if __name__ == "__main__":
main()
logger.info("Successful program exit")
|
the-stack_106_22277 | from Bio import SeqIO
import sys
# Put error and out into the log file
sys.stderr = sys.stdout = open(snakemake.log[0], "w")
# Get all the proteins of the database inside a dict-like structure
all_index_fasta = SeqIO.index(snakemake.input.protein_fasta, 'fasta')
# List of possible truncatenated ids
list_cutname = []
# Read line by line the protein table hit without loading it in memory
with open(snakemake.input.list_all_prot, 'rt') as r_file :
with open(snakemake.output.fasta, 'wt') as w_file :
for line in r_file :
if not line.startswith('protein_id'):
tmp_line = line.split()
proteins_of_interest = tmp_line[0]
if proteins_of_interest in all_index_fasta:
SeqIO.write(all_index_fasta[proteins_of_interest], w_file, 'fasta')
else :
list_cutname.append(proteins_of_interest)
# Now parse the file to get the trucatenate fasta sequences
parser = SeqIO.parse(snakemake.input.protein_fasta, 'fasta')
for prot in parser:
for cutname in list_cutname:
if prot.id.startswith(cutname):
SeqIO.write(prot, w_file, 'fasta')
|
the-stack_106_22279 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsageDetailsOperations(object):
"""UsageDetailsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.consumption.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
scope, # type: str
expand=None, # type: Optional[str]
filter=None, # type: Optional[str]
skiptoken=None, # type: Optional[str]
top=None, # type: Optional[int]
metric=None, # type: Optional[Union[str, "_models.Metrictype"]]
start_date=None, # type: Optional[str]
end_date=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.UsageDetailsListResult"]
"""Lists the usage details for the defined scope. Usage details are available via this API only
for May 1, 2014 or later.
:param scope: The scope associated with usage details operations. This includes
'/subscriptions/{subscriptionId}/' for subscription scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope,
'/providers/Microsoft.Billing/departments/{departmentId}' for Department scope,
'/providers/Microsoft.Billing/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount
scope and '/providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management
Group scope. For subscription, billing account, department, enrollment account and management
group, you can also add billing period to the scope using
'/providers/Microsoft.Billing/billingPeriods/{billingPeriodName}'. For e.g. to specify billing
period at department scope use
'/providers/Microsoft.Billing/departments/{departmentId}/providers/Microsoft.Billing/billingPeriods/{billingPeriodName}'.
Also, Modern Commerce Account scopes are
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for billingAccount scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for billingProfile scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/invoiceSections/{invoiceSectionId}'
for invoiceSection scope, and
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/customers/{customerId}'
specific for partners.
:type scope: str
:param expand: May be used to expand the properties/additionalInfo or properties/meterDetails
within a list of usage details. By default, these fields are not included when listing usage
details.
:type expand: str
:param filter: May be used to filter usageDetails by properties/resourceGroup,
properties/resourceName, properties/resourceId, properties/chargeType,
properties/reservationId, properties/publisherType or tags. The filter supports 'eq', 'lt',
'gt', 'le', 'ge', and 'and'. It does not currently support 'ne', 'or', or 'not'. Tag filter is
a key value pair string where key and value is separated by a colon (:). PublisherType Filter
accepts two values azure and marketplace and it is currently supported for Web Direct Offer
Type.
:type filter: str
:param skiptoken: Skiptoken is only used if a previous operation returned a partial result. If
a previous response contains a nextLink element, the value of the nextLink element will include
a skiptoken parameter that specifies a starting point to use for subsequent calls.
:type skiptoken: str
:param top: May be used to limit the number of results to the most recent N usageDetails.
:type top: int
:param metric: Allows to select different type of cost/usage records.
:type metric: str or ~azure.mgmt.consumption.models.Metrictype
:param start_date: May be used to specify the beginning of the interval you want usageDetails from.
If start_date and end_date are not included, then the call will return the usageDetails for
the current billing month. Must be in the format YYYY-MM-DD.
:type start_date: str
:param end_date: May be used to specify the end of the interval you want usageDetails from.
If start_date and end_date are not included, then the call will return the usageDetails for
the current billing month. Must be in the format YYYY-MM-DD.
:type end_date: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageDetailsListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.consumption.models.UsageDetailsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsageDetailsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if skiptoken is not None:
query_parameters['$skiptoken'] = self._serialize.query("skiptoken", skiptoken, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=1000, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if metric is not None:
query_parameters['metric'] = self._serialize.query("metric", metric, 'str')
if start_date is not None:
query_parameters['startDate'] = self._serialize.query("startDate", start_date, 'str')
if end_date is not None:
query_parameters['endDate'] = self._serialize.query("endDate", end_date, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('UsageDetailsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/{scope}/providers/Microsoft.Consumption/usageDetails'} # type: ignore
|
the-stack_106_22282 | from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404
from .models import Image,Category,Location
import pyperclip
# Create your views here.
def welcome(request):
'''
a function to display the whole images and welcome message.
'''
images = Image.get_all_images()
return render(request, 'welcome.html', {"images":images})
def search_image(request):
'''
a function to search image based on their categories.
'''
categories = Category.objects.all()
if 'image' in request.GET and request.GET['image']:
category_item = request.GET.get('image')
searched_image = Image.search_by_category(category_item)
message = f"{category_item}"
return render(request, 'search.html', {"images":searched_image,"message":message, "categories":categories})
else:
message = "You have not search for any item"
return render(request, 'search.html', {"message": message})
def image_location(request,location_id):
'''
a function to filter image by location.
'''
location_of_image = Image.filter_by_location(location_id)
return render(request,'location.html', {"location_of_image":location_of_image})
def image(request,image_id):
'''
a funtion to display single image.
'''
try:
image = Image.objects.get(id = image_id)
except DoesNotExist:
raise Http404()
return render(request, 'images.html', {"image":image})
def copy_image_url(request, image_id):
'''
a function to copy image link.
'''
images = Image.get_all_images()
loc = Image.objects.get( id = image_id)
pyperclip.copy('http://127.0.0.1:8000/' + loc.pic_image.url)
pyperclip.paste()
return render(request, 'welcome.html', {"images":images})
|
the-stack_106_22283 | """
Handle signal names.
Author: Vishakha
Created: 2020-08-07
"""
import covidcast
def add_prefix(signal_names, wip_signal, prefix="wip_"):
"""Adds prefix to signal if there is a WIP signal
Parameters
----------
signal_names: List[str]
Names of signals to be exported
prefix : 'wip_'
prefix for new/non public signals
wip_signal : List[str] or bool
a list of wip signals: [], OR
all signals in the registry: True OR
only signals that have never been published: False
Returns
-------
List of signal names
wip/non wip signals for further computation
"""
if wip_signal is True:
return [prefix + signal for signal in signal_names]
if isinstance(wip_signal, list):
make_wip = set(wip_signal)
return[
prefix + signal if signal in make_wip else signal
for signal in signal_names
]
if wip_signal in {False, ""}:
return [
signal if public_signal(signal)
else prefix + signal
for signal in signal_names
]
raise ValueError("Supply True | False or '' or [] | list()")
def public_signal(signal_):
"""Checks if the signal name is already public using COVIDcast
Parameters
----------
signal_ : str
Name of the signal
Returns
-------
bool
True if the signal is present
False if the signal is not present
"""
epidata_df = covidcast.metadata()
for index in range(len(epidata_df)):
if epidata_df['signal'][index] == signal_:
return True
return False
|
the-stack_106_22285 | #!/usr/local/bin/python3
"""
_im_utils.py - separate utility functions for tif images
date: 20160429
date: 20170810 - combine all util functions
date: 20180218 - add sift functions
date: 20180315 - tidy up and add Bobby's fitting algorithm
"""
import os
import tifffile
import skimage
import scipy, scipy.signal
import numpy as np
import matplotlib.pyplot as plt
__author__ = 'Sung-Cheol Kim'
__version__ = '1.2.2'
############################################ file processing
def readTif(filename, method='full'):
""" old method to read tif using tifffile """
print('... read file: %s' % filename)
ffilename = filename.replace('BIOSUB', 'sBIOSUB')
if method != 'full' and os.path.exists(ffilename):
with tifffile.TiffFile(ffilename) as imfile:
images = imfile.asarray()
imgMean = skimage.img_as_float(images)
elif os.path.exists(filename):
with tifffile.TiffFile(filename) as imfile:
images = imfile.asarray()
images = skimage.img_as_float(images)
if images.shape[0] > 2:
imgMean = images.mean(axis=0)
else:
imgMean = images
scipy.misc.imsave(ffilename, imgMean)
print('... save mean image: %s' % ffilename)
else:
print('... no file')
return 0
if method == 'full':
return images
return imgMean
def _plotAxis(image, axis=0, background=0, backstd=0, window=None):
'''
plot line profile collectively from 2d image
Inputs:
image = 2D numpy array image
Options:
axis = x or y axis
background = background value
backstd = background standard deviation
window =
Return:
'''
if window is None:
i0 = 0
i1 = image.shape[axis]
else:
i0 = window[0]
i1 = window[1]
if axis in [0, 1]:
if axis == 0:
off_axis = 1
plt.xlabel('x (pixels)')
line = image[i0:i1, :].mean(axis=axis)
else:
off_axis = 0
plt.ylabel('y (pixels)')
line = image[:, i0:i1].mean(axis=axis)
for i in range(i0, i1):
if axis == 0:
plt.plot(_smooth(image[i, :]), alpha=0.5)
elif axis == 1:
plt.plot(_smooth(image[:, i]), alpha=0.5)
# pattern recognition 1
localmaxs = scipy.signal.argrelmax(_smooth(line), order=10)
for lm in localmaxs[0]:
print("... local maximums: %i " % lm)
# pattern recognition 2
dline = _smooth(line[1:] - line[:-1])
localmaxds = scipy.signal.argrelmax(np.abs(dline), order=15)
for ldm in localmaxds[0]:
print("... local maximum derivatives: %i " % ldm)
# pattern recognition 3
der_max = np.argmax(dline)
der_min = np.argmin(dline)
if (der_max+4 > i1):
dI_max = line[der_max-3] - line[i1-1]
elif (der_max-3 < 0):
dI_max = line[0] - line[der_max+3]
else:
dI_max = line[der_max-3] - line[der_max+3]
dI_min = line[der_min-3] - line[der_min+3]
print("... maximum derivatives: %i dI %.2f " % (der_max, dI_max))
print("... minimum derivatives: %i dI %.2f " % (der_min, dI_min))
plt.plot(line, color='w')
plt.plot(dline, color='gray')
plt.xlim([0, image.shape[off_axis]])
plt.vlines(localmaxs, 0.0, line[localmaxs], color='y', linestyles='dashed')
plt.vlines(localmaxds, 0.0, line[localmaxds], color='b', linestyles='dashed')
plt.hlines(background, 0, image.shape[off_axis], linestyles='dashed')
plt.hlines(backstd+background, 0, image.shape[off_axis], linestyles='dashed')
plt.show()
def showTwo(image1, image2):
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig = plt.figure(figsize=(12, 6))
ax1 = fig.add_subplot(121)
im = plt.imshow(image1)
divider1 = make_axes_locatable(ax1)
cax1 = divider1.append_axes("right", size="3%", pad=0.1)
plt.colorbar(im, cax=cax1)
ax2 = fig.add_subplot(122)
divider2 = make_axes_locatable(ax2)
im = plt.imshow(image2)
cax2 = divider2.append_axes("right", size="3%", pad=0.1)
plt.colorbar(im, cax=cax2)
############################################ image processing
def gauss_kern(size, sizey=None):
""" Returns a normalized 2D gauss kernel array for convolutions """
size = int(size)
if not sizey:
sizey = size
else:
sizey = int(sizey)
x, y = np.mgrid[-size:size+1, -sizey:sizey+1]
g = np.exp(-(x**2/float(size)+y**2/float(sizey)))
return g / g.sum()
def blur_image(im, n, ny=None):
""" blurs the image by convolving with a gaussian kernel of typical
size n. The optional keyword argument ny allows for a different
size in the y direction.
"""
g = gauss_kern(n, sizey=ny)
improc = scipy.signal.convolve(im, g, mode='valid')
return(improc)
def denoise(im, U_init, tolerance=0.1, tau=0.125, tv_weight=100):
""" An implementation of the Rudin-Osher-Fatemi (ROF) denoising model
using the numerical procedure presented in eq (11) A. Chambolle (2005).
Input: noisy input image (grayscale), initial guess for U, weight of
the TV-regularizing term, steplength, tolerance for stop criterion.
Output: denoised and detextured image, texture residual. """
m, n = im.shape # size of noisy image
# initialize
U = U_init
Px = im # x-component to the dual field
Py = im # y-component of the dual field
error = 1
while( error > tolerance):
Uold = U
# gradient of primal variable
GradUx = np.roll(U, -1, axis=1) - U # x-component of U's gradient
GradUy = np.roll(U, -1, axis=0) - U # y-component of U's gradient
# update the dual variable
PxNew = Px + (tau/tv_weight)*GradUx
PyNew = Py + (tau/tv_weight)*GradUy
NormNew = np.maximum(1, np.sqrt((PxNew**2 + PyNew**2)))
Px = PxNew/NormNew # update of x-component (dual)
Py = PyNew/NormNew # update of y-component (dual)
# update the primal variable
RxPx = np.roll(Px, 1, axis=1) # right x-translation of x-component
RyPy = np.roll(Py, 1, axis=0) # right y-translation of y-component
DivP = (Px-RxPx)+(Py-RyPy) # divergence of the dual field
U = im + tv_weight*DivP # update of the primal variable
# update of error
error = np.linalg.norm(U-Uold)/np.sqrt(n*m);
return U, im-U # denoised image and texture residual
############################################ line profile and fitting
def _smooth(x, window_len=11, window='flat'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError('Window is on of', 'flat', 'hanning', 'hamming', 'bartlett', 'blackman')
s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]
# print(len(s))
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='valid')
return y[(np.int(window_len/2)-1):-np.int(window_len/2)]
def _find_channel(image, window=11, compareRatio=0.2, minimumIntensity=0.1):
"""
find_channel
Parameters:
image - image source
window - smoothing window
compareRatio - criteria for intensity variation of two walls
minimumIntensity - boundary delta value of intensity
Return:
"""
# normalize image value
image = skimage.exposure.rescale_intensity(image, in_range='image', out_range=(0.0, 1.0))
# find channel wall
height, width = image.shape
x_mean_line = image.mean(axis=0)
# check wall is too close to the frame edge
edge_pixel = 15
x_mean_line = x_mean_line[edge_pixel:-edge_pixel]
dx_mean_line = _smooth(x_mean_line[1:] - x_mean_line[:-1], window_len=window)
dx_max = np.argmax(dx_mean_line)
dx_min = np.argmin(dx_mean_line)
dx = np.int((window-1)/2)
# check wall and frame boundary
print(image.shape)
if (dx_max+dx >= width-2*edge_pixel):
dI_max = x_mean_line[width-2*edge_pixel-1] - x_mean_line[dx_max-dx]
elif (dx_max-dx < 0):
dI_max = x_mean_line[dx_max+dx] - x_mean_line[0]
else:
dI_max = x_mean_line[dx_max+dx] - x_mean_line[dx_max-dx]
if (dx_min+dx >= width-2*edge_pixel):
dI_min = x_mean_line[dx_min-dx] - x_mean_line[width-2*edge_pixel-1]
elif (dx_min-dx < 0):
dI_min = x_mean_line[0] - x_mean_line[dx_min+dx]
else:
dI_min = x_mean_line[dx_min-dx] - x_mean_line[dx_min+dx]
# compare threshold for wall
if np.abs(dI_max - dI_min) < compareRatio:
if dx_max < dx_min:
x0, x1 = dx_max+edge_pixel, dx_min+edge_pixel
else:
x0, x1 = dx_min+edge_pixel, dx_max+edge_pixel
width = x1 - x0
print("... find wall x0, x1 and width: %i, %i, %i" % (x0, x1, width))
else:
print("... fail to find channel wall")
print("... dx_max: %i dI_max: %.3f" % (dx_max, dI_max))
print("... dx_min: %i dI_min: %.3f" % (dx_min, dI_min))
return 0
# find different channel domain
y_mean_line = image[:, x0:x1].mean(axis=1)
dy_mean_line = _smooth(y_mean_line[1:] - y_mean_line[:-1], window_len=window)
dy_max = np.argmax(dy_mean_line)
dy_min = np.argmin(dy_mean_line)
dy = np.int((window-1)/2)
if (dy_max+dy >= height) or (dy_max-dy < 0):
dy = 0
dI_max = y_mean_line[dy_max+dy] - y_mean_line[dy_max-dy]
dy = np.int((window-1)/2)
if (dy_min+dy >= height) or (dy_min-dy < 0):
dy = 0
dI_min = y_mean_line[dy_min-dy] - y_mean_line[dy_min+dy]
if np.abs(dI_max) > minimumIntensity:
if np.abs(dI_min) > minimumIntensity:
if dy_max > dy_min:
y0, y1 = dy_min, dy_max
else:
y0, y1 = dy_max, dy_min
print("... three channel area: %i, %i" % (y0, y1))
return (x0, x1, y0, y1)
else:
y0 = dy_max
print("... two channel area: %i" % y0)
print("... dy_min: %i, dI_min: %.3f" % (dy_min, dI_min))
return (x0, x1, y0)
elif np.abs(dI_min) > minimumIntensity:
y0 = dy_min
print("... two channel area: %i" % y0)
print("... dy_max: %i, dI_max: %.3f" % (dy_max, dI_max))
return (x0, x1, y0)
else:
print("... only channel")
print("... dy_max: %i, dI_max: %.3f" % (dy_max, dI_max))
print("... dy_min: %i, dI_min: %.3f" % (dy_min, dI_min))
return (x0, x1)
def func(x, a, b, c):
return 1.0/(c+np.abs((x-b)/a))
def gaussian(k, x):
""" gaussian function
k - coefficient array, x - values """
return (k[0]/(np.sqrt(2*np.pi)*k[2])) * np.exp(-(x-k[1])**2 /(2*k[2]**2)) + k[3]
def line(k, x):
""" line function """
return k[0]*x + k[1]
def loss(k, x, y, f, nu):
""" optimization function
k - coefficients
x, y - values
f - function
nu - normalization factor """
res = y - f(k, x)
return np.sum(np.log(1 + res**2/nu))
def robust_gaussian_fit(x, y, nu=1.0, initial=[1.0, 0.0, 1.0, 0.0], verb=False):
""" robust fit using log loss function """
return scipy.optimize.fmin(loss, initial, args=(x, y, gaussian, nu), disp=verb)
def robust_line_fit(x, y, nu=1.0, initial=[1.0, 0.0], verb=False):
""" robust fit using log loss function """
return scipy.optimize.fmin(loss, initial, args=(x, y, line, nu), disp=verb)
def find_after(a, a0):
"Element in nd array `a` closest to the scalar value `a0`"
(idx, ) = np.where(a - a0 > 0)
return a[idx.min()]
def find_before(a, a0):
(idx, ) = np.where(a - a0 < 0)
return a[idx.max()]
def zprofile(imgMean, location):
width = 2
temp = imgMean[location-width:location+width, :]
return np.sum(temp, axis=0)/(2.0*width+1.0)
def _showThresh(image, percentage=0.1, smoothing_pixel=30):
Y = np.arange(image.shape[0])
loc = np.arange(image.shape[0])
for i in range(image.shape[0]):
xline = image[i, :]
s_xline = _smooth(xline, window_len=smoothing_pixel)
threshold = s_xline.min() + percentage*(s_xline.max()-s_xline.min())
loc[i] = np.abs(s_xline - threshold).argmin()
plt.plot(loc, Y, color='white')
# print(loc)
plt.imshow(image)
plt.show()
############################################ pattern matching
def process_image(img, resultname, params="--edge-thresh 10 --peak-thresh 5"):
""" Process an image and save the results in a file. """
try:
import cv2
except ImportError:
print('... opencv is not installed')
return
#print(img.shape, img.min(), img.max())
img = np.uint8(255.0*(img - img.min())/(img.max() - img.min()))
# sift
#sift = cv2.xfeatures2d.SIFT_create()
#kp, desc = sift.detectAndCompute(img, None)
# star + brief
#star = cv2.xfeatures2d.StarDetector_create()
#brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
#kp = star.detect(img, None)
#kp, desc = brief.compute(img, kp)
# ORB
orb = cv2.ORB_create()
kp, desc = orb.detectAndCompute(img, None)
locs = []
for k in kp:
locs.append([k.pt[0], k.pt[1], k.size, k.angle])
locs = np.array(locs)
locs = locs.reshape((len(kp), 4))
#print(locs.shape, desc.shape)
write_features_to_file(resultname, locs, desc)
def read_features_from_file(filename):
""" Read feature properties and return in matrix form. """
f = np.loadtxt(filename)
return f[:, :4], f[:, 4:]
def write_features_to_file(filename, locs, desc):
""" Save feature location and descriptor to file. """
np.savetxt(filename, np.hstack((locs, desc)))
def plot_features(im, locs, circle=False):
""" Show image with features. input: im (image as array),
locs (row, col, scale, orientation of each feature). """
def draw_circle(c, r):
t = np.arange(0, 1.01, .01)*2*np.pi
x = r*np.cos(t) + c[0]
y = r*np.sin(t) + c[1]
plt.plot(x, y, 'b', linewidth=2)
plt.imshow(im)
if circle:
for p in locs:
draw_circle(p[:2], p[2])
else:
plt.plot(locs[:, 0], locs[:, 1], 'ob')
plt.axis('off')
def match(desc1, desc2):
""" For each descriptor in the first image,
select its match in the second image.
input: desc1 (descriptors for the first image),
desc2 (same for second image). """
desc1 = np.array([d/np.linalg.norm(d) for d in desc1])
desc2 = np.array([d/np.linalg.norm(d) for d in desc2])
dist_ratio = 0.6
desc1_size = desc1.shape
matchscores = np.zeros((desc1_size[0],1), 'int')
desc2t = desc2.T # precompute matrix transpose
for i in range(desc1_size[0]):
dotprods = np.dot(desc1[i,:], desc2t) # vector of dot products
dotprods = 0.9999*dotprods
# inverse cosine and sort, return index for features in second image
indx = np.argsort(np.arccos(dotprods))
# check if nearest neighbor has angle less than dist_ratio times 2nd
if np.arccos(dotprods)[indx[0]] < dist_ratio*np.arccos(dotprods)[indx[1]]:
matchscores[i] = np.int(indx[0])
return matchscores
def match_twosided(desc1, desc2):
""" Two-sided symmetric version of match(). """
matches_12 = match(desc1, desc2)
matches_21 = match(desc2, desc1)
ndx_12 = matches_12.nonzero()[0]
# remove matches that are not symmetric
for n in ndx_12:
if matches_21[int(matches_12[n])] != n:
matches_12[n] = 0
return matches_12
def compute_harris_response(im, sigma=3):
""" Compute the Harris corner detector response function
for each pixel in a graylevel image. """
from scipy.ndimage import filters
# derivatives
imx = np.zeros(im.shape)
filters.gaussian_filter(im, (sigma, sigma), (0, 1), imx)
imy = np.zeros(im.shape)
filters.gaussian_filter(im, (sigma, sigma), (1, 0), imy)
# compute components of the Harris matrix
Wxx = filters.gaussian_filter(imx*imx, sigma)
Wxy = filters.gaussian_filter(imx*imy, sigma)
Wyy = filters.gaussian_filter(imy*imy, sigma)
# determinant and trace
Wdet = Wxx*Wyy - Wxy**2
Wtr = Wxx + Wyy
return Wdet / Wtr
def get_harris_points(harrisim, min_dist=10, threshold=0.1):
""" Retrun corners from a Harris response image
min_dist is the minimum number of pixels separating
corners and image boundary. """
# find top corner candidates above a threshold
corner_threshold = harrisim.max() * threshold
harrisim_t = (harrisim > corner_threshold) * 1
# get coordinates of candidates
coords = np.array(harrisim_t.nonzero()).T
# ... and their values
candidate_values = [harrisim[c[0], c[1]] for c in coords]
# sort candidates
index = np.argsort(candidate_values)
# store allowed point locations in array
allowed_locations = np.zeros(harrisim.shape)
allowed_locations[min_dist:-min_dist, min_dist:-min_dist] = 1
# select the best points taking min_distance into account
filtered_coords = []
for i in index:
if allowed_locations[coords[i, 0], coords[i, 1]] == 1:
filtered_coords.append(coords[i])
allowed_locations[(coords[i, 0]-min_dist):(coords[i, 0]+min_dist),
(coords[i, 1]-min_dist):(coords[i, 1]+min_dist)] = 0
return filtered_coords
def plot_harris_points(image, filtered_coords):
""" Plots corners found in image. """
plt.figure()
plt.gray()
plt.imshow(image)
plt.plot([p[1] for p in filtered_coords], [p[0] for p in filtered_coords], '*')
plt.axis('off')
plt.show()
def get_descriptors(image, filtered_coords, wid=5):
""" For each point return, pixel values around the point
using a neighbourhood of width 2*wid+1. (Assume points are
extracted with min_distance > wid). """
desc = []
for coords in filtered_coords:
patch = image[coords[0]-wid:coords[0]+wid+1,
coords[1]-wid:coords[1]+wid+1].flatten()
desc.append(patch)
return desc
def match_h(desc1, desc2, threshold=0.5):
""" For each corner point descriptor in the first image,
select its match to second image using normalized cross-correlation. """
n = len(desc1[0])
# pair-wise distances
d = -np.ones((len(desc1), len(desc2)))
for i in range(len(desc1)):
for j in range(len(desc2)):
d1 = (desc1[i] - np.mean(desc1[i])) / np.std(desc1[i])
d2 = (desc2[j] - np.mean(desc2[j])) / np.std(desc2[j])
ncc_value = sum(d1 * d2) / (n-1)
if ncc_value > threshold:
d[i, j] = ncc_value
ndx = np.argsort(-d)
matchscores = ndx[:, 0]
return matchscores
def match_twosided_h(desc1, desc2, threshold=0.5):
""" Two-sided symmetric version of match(). """
matches_12 = match_h(desc1, desc2, threshold)
matches_21 = match_h(desc2, desc1, threshold)
ndx_12 = np.where(matches_12 >= 0)[0]
# remove matches that are not symmetric
for n in ndx_12:
if matches_21[matches_12[n]] != n:
matches_12[n] = -1
return matches_12
def appendimages(im1, im2):
""" Return a new image that appends the two images side-by-side. """
# select the image with the fewest rows and fill in enough empty rows
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.concatenate((im1, np.zeros((rows2-rows1, im1.shape[1]))), axis=0)
elif rows1 > rows2:
im2 = np.concatenate((im2, np.zeros((rows1-rows2, im2.shape[1]))), axis=0)
# if none of these cases they are equal, no filling needed.
return np.concatenate((im1, im2), axis=1)
def plot_matches(im1, im2, locs1, locs2, matchscores, show_below=True):
""" Show a figure with lines joining the accepted matches
input: im1, im2 (images as arrays), locs1, locs2 (feature locations),
matchscores (as output from 'match()'),
show_below (if images should be shown below matches). """
im3 = appendimages(im1, im2)
if show_below:
im3 = np.vstack((im3, im3))
plt.imshow(im3)
cols1 = im1.shape[1]
for i, m in enumerate(matchscores):
if m > 0:
plt.plot([locs1[i][1], locs2[m][1]+cols1], [locs1[i][0], locs2[m][0]], 'c')
plt.axis('off')
plt.show()
def ROF_values(f, x, y, clambda):
""" Compute the ROF cost functional """
a = np.linalg.norm((f-x).flatten())**2/2
b = np.sum(np.sqrt(np.sum(y**2, axis=2)).flatten())
return a + clambda*b
def prox_project(clambda, z):
""" Projection to the clambda-ball """
nrm = np.sqrt(z[:, :, 0]**2 + z[:, :, 1]**2)
fact = np.minimum(clambda, nrm)
fact = np.divide(fact, nrm, out=np.zeros_like(fact), where=nrm!=0)
y = np.zeros(z.shape)
y[:, :, 0] = np.multiply(z[:, :, 0], fact)
y[:, :, 1] = np.multiply(z[:, :, 1], fact)
return y
def projectedGD_ROf(image, clambda, iters=100):
""" 2D Dual ROF solver using Projected Gradient Descent Method """
start_time = timeit.default_timer()
op = operators.make_finite_differences_operator(image.shape, 'fn', 1)
y = op.val(image)
x = image
vallog = np.zeros(iters)
alpha = 0.1
for i in range(iters):
y -= alpha * op.val(op.conj(y) - image)
y = operators.prox_project(clambda, y)
x = image - op.conj(y)
vallog[i] = ROF_value(image, x, op.val(x), clambda)
print("...Finished in %d iterations and %f sec" % (iters, timeit.default_timer() - start_time))
return (x, vallog)
# vim:foldmethod=indent:foldlevel=0
|
the-stack_106_22286 | import ipaddress
import subprocess
from typing import Union
# def ping_ip(ip_address: ipaddress.IPv4Address) -> bool:
def ping_ip(ip_address: Union[str, ipaddress.IPv4Address]) -> bool:
reply = subprocess.run(
["ping", "-c", "3", "-n", str(ip_address)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
if reply.returncode == 0:
return True
else:
return False
ip1 = ipaddress.ip_address("10.1.1.1")
print(ping_ip(ip1))
print(ping_ip("8.8.8.8"))
print(ping_ip("a"))
|
the-stack_106_22288 | from models.connection import get_cnx, tables
student_table = tables["student"]
class Student:
@staticmethod
def add_student(tournament_id, team_num, name):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"INSERT INTO {student_table} (tournament_id, team_num, student_name) VALUES (%s, %s, %s)",
(tournament_id, team_num, name),
)
db.commit()
return cursor.lastrowid
@staticmethod
def get_student(student_id):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"SELECT student_name FROM {student_table} WHERE id = %s", (student_id,)
)
return {"name": cursor.fetchone()[0]}
|
the-stack_106_22289 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
from typing import Any, List, Tuple, Union
import torch
from mydl.layers import interpolate
class Keypoints:
"""
Stores keypoint annotation data. GT Instances have a `gt_keypoints` property
containing the x,y location and visibility flag of each keypoint. This tensor has shape
(N, K, 3) where N is the number of instances and K is the number of keypoints per instance.
The visibility flag follows the COCO format and must be one of three integers:
* v=0: not labeled (in which case x=y=0)
* v=1: labeled but not visible
* v=2: labeled and visible
"""
def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]):
"""
Arguments:
keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint.
The shape should be (N, K, 3) where N is the number of
instances, and K is the number of keypoints per instance.
"""
device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device("cpu")
keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)
assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape
self.tensor = keypoints
def __len__(self) -> int:
return self.tensor.size(0)
def to(self, *args: Any, **kwargs: Any) -> "Keypoints":
return type(self)(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor:
"""
Arguments:
boxes: Nx4 tensor, the boxes to draw the keypoints to
Returns:
heatmaps:
A tensor of shape (N, K) containing an integer spatial label
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
valid:
A tensor of shape (N, K) containing whether each keypoint is in the roi or not.
"""
return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size)
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints":
"""
Create a new `Keypoints` by indexing on this `Keypoints`.
The following usage are allowed:
1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance.
2. `new_kpts = kpts[2:10]`: return a slice of key points.
3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor
with `length = len(kpts)`. Nonzero elements in the vector will be selected.
Note that the returned Keypoints might share storage with this Keypoints,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return Keypoints([self.tensor[item]])
return Keypoints(self.tensor[item])
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop)
def _keypoints_to_heatmap(
keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space.
Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the
closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the
continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"):
d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
Arguments:
keypoints: tensor of keypoint locations in of shape (N, K, 3).
rois: Nx4 tensor of rois in xyxy format
heatmap_size: integer side length of square heatmap.
Returns:
heatmaps: A tensor of shape (N, K) containing an integer spatial label
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
valid: A tensor of shape (N, K) containing whether each keypoint is in
the roi or not.
"""
if rois.numel() == 0:
return rois.new().long(), rois.new().long()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
offset_x = offset_x[:, None]
offset_y = offset_y[:, None]
scale_x = scale_x[:, None]
scale_y = scale_y[:, None]
x = keypoints[..., 0]
y = keypoints[..., 1]
x_boundary_inds = x == rois[:, 2][:, None]
y_boundary_inds = y == rois[:, 3][:, None]
x = (x - offset_x) * scale_x
x = x.floor().long()
y = (y - offset_y) * scale_y
y = y.floor().long()
x[x_boundary_inds] = heatmap_size - 1
y[y_boundary_inds] = heatmap_size - 1
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
vis = keypoints[..., 2] > 0
valid = (valid_loc & vis).long()
lin_ind = y * heatmap_size + x
heatmaps = lin_ind * valid
return heatmaps, valid
@torch.no_grad()
def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor:
"""
Extract predicted keypoint locations from heatmaps.
Args:
maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for
each ROI and each keypoint.
rois (Tensor): (#ROIs, 4). The box of each ROI.
Returns:
Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to
(x, y, logit, score) for each keypoint.
When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate,
we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from
Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
"""
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = (rois[:, 2] - rois[:, 0]).clamp(min=1)
heights = (rois[:, 3] - rois[:, 1]).clamp(min=1)
widths_ceil = widths.ceil()
heights_ceil = heights.ceil()
num_rois, num_keypoints = maps.shape[:2]
xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4)
width_corrections = widths / widths_ceil
height_corrections = heights / heights_ceil
keypoints_idx = torch.arange(num_keypoints, device=maps.device)
for i in range(num_rois):
outsize = (int(heights_ceil[i]), int(widths_ceil[i]))
roi_map = interpolate(maps[[i]], size=outsize, mode="bicubic", align_corners=False).squeeze(
0
) # #keypoints x H x W
# softmax over the spatial region
max_score, _ = roi_map.view(num_keypoints, -1).max(1)
max_score = max_score.view(num_keypoints, 1, 1)
tmp_full_resolution = (roi_map - max_score).exp_()
tmp_pool_resolution = (maps[i] - max_score).exp_()
# Produce scores over the region H x W, but normalize with POOL_H x POOL_W,
# so that the scores of objects of different absolute sizes will be more comparable
roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True)
w = roi_map.shape[2]
pos = roi_map.view(num_keypoints, -1).argmax(1)
x_int = pos % w
y_int = (pos - x_int) // w
assert (
roi_map_scores[keypoints_idx, y_int, x_int]
== roi_map_scores.view(num_keypoints, -1).max(1)[0]
).all()
x = (x_int.float() + 0.5) * width_corrections[i]
y = (y_int.float() + 0.5) * height_corrections[i]
xy_preds[i, :, 0] = x + offset_x[i]
xy_preds[i, :, 1] = y + offset_y[i]
xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int]
xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int]
return xy_preds
|
the-stack_106_22290 | from shared.args import arg_help, base_parser
def arguments(name):
parser = base_parser(name)
parser.add_argument(
'-u', '--unfollow', default=0, type=int, dest='num_unfollow',
help=arg_help('how many accounts to unfollow at once (0 = unlimited)')
)
parser.add_argument(
'-f', '--follow', default=9, type=int, dest='num_tofollow',
help=arg_help('how many accounts to follow at once (0 = unlimited)')
)
parser.add_argument(
'-n', '--name', default=None, type=str, dest='account_name',
help=arg_help('follow up to --follow accounts from @name followers')
)
return parser.parse_args()
|
the-stack_106_22291 | from pathlib import Path
from typer import Option
from deckz.cli import app
from deckz.running import run_all as running_run_all
@app.command()
def check_all(
handout: bool = Option(False, help="Produce PDFs without animations"),
presentation: bool = Option(True, help="Produce PDFs with animations"),
print: bool = Option(False, help="Produce printable PDFs"),
workdir: Path = Option(
Path("."), help="Path to move into before running the command"
),
) -> None:
"""Compile all shared slides (presentation only by default)."""
running_run_all(
directory=workdir,
build_handout=handout,
build_presentation=presentation,
build_print=print,
)
|
the-stack_106_22292 | # -*- coding: utf-8 -*-
# Copyright (c) 2017-2020 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import pathlib
from . import test, configuration
def tests(base_dir, cfg):
base_dir = pathlib.Path(base_dir)
for fn in (base_dir / 'tests').rglob("*.json"):
try:
yield test.Test(fn, base_dir, cfg)
except Exception as ex:
print(" WARN: Can't parse test '{}' - {}".format(fn.name, ex))
def config(base_dir):
fn = base_dir / 'ted.json'
cfg = json.loads(fn.read_text())
return configuration.Configuration(cfg, base_dir)
|
the-stack_106_22293 | from django.conf.urls import url
from . import views
urlpatterns = [
# e.g.: /
#url(r'^$', views.index, name='index'),
url(
regex=r'^$',
view=views.GroupList.as_view(),
name='list'
),
# e.g.: /groups/3/
url(
regex=r'^(?P<pk>[0-9]+)/$',
view=views.GroupDetail.as_view(),
name='detail'
),
# e.g.: /groups/create/
url(
regex=r'^create$',
view=views.GroupCreate.as_view(),
name='create'
),
# e.g.: /groups/update/3/
url(
regex=r'^update/(?P<pk>[0-9]+)/$',
view=views.GroupUpdate.as_view(),
name='update'
),
# e.g.: /groups/delete/3/
url(
regex=r'^delete/(?P<pk>[0-9]+)/$',
view=views.GroupDelete.as_view(),
name='delete'
),
]
|
the-stack_106_22294 | # This class create the instance to manage the serverIP and the relative broadcast management
import network as net
import socket as sock
import machine
import os
import time
import ubinascii
import urandom
import math
def get_ip_address():
ip_address = ''
sta_if = net.WLAN(net.STA_IF)
temp = sta_if.ifconfig()
ip_address = temp[0]
return ip_address
class BroadcastManager:
def __init__(self,
dirdata = "data",
filedata = "serverip.data",
boardKind = "I",
portBroadCast = 51082):
self.dirdata = dirdata
self.filedata = filedata
self.portBroadCast = portBroadCast
self.mac = ubinascii.hexlify(net.WLAN().config('mac'),':').decode()
self.boardKind = boardKind
self.build_bc_msg()
self.checkFileAndCreate(self.dirdata,self.filedata)
self.IPfile = self.dirdata + '/' + self.filedata
self.ServerIP = []
self.FileGetServerIP()
self.sec2millis = 1000
self.min2sec = 60
self.min2millis = self.min2sec*self.sec2millis
self.millis_old = int(round(time.time()*self.sec2millis))
self.waitTime = 1
self.sockBroadCast = []
def init_bc_sock(self):
self.sockBroadCast=sock.socket(sock.AF_INET,sock.SOCK_DGRAM)
self.sockBroadCast.settimeout(0.1)
self.sockBroadCast.bind(('',self.portBroadCast))
def build_bc_msg(self):
self.ip = get_ip_address()
self.message = 'D,' + self.mac+','+self.ip+','+self.boardKind
def stripInMessage(self,inline):
inline = str(inline)
inline = inline.replace('b\'','')
outline = inline.replace('\'','')
return outline
def check_bc_msg(self):
millis_new = int(round(time.time()*self.sec2millis))
millis_pass = millis_new-self.millis_old
if (millis_pass/self.min2millis>=self.waitTime):
self.log('inside if')
self.millis_old = millis_new
self.waitTime = 1
self.log('Build message')
self.build_bc_msg()
self.send_bc_msg()
try:
m=self.sockBroadCast.recvfrom(1024)
line = self.stripInMessage(m[0])
self.log('Recived message from broadcast: ' + line)
elements = list(line.split(','))
if elements[0].strip()=='S':
self.waitTime = self.get_random_time()
self.log('New waiting time: ' + str(self.waitTime))
if (self.ServerIP[0] != elements[1]):
self.log('build SERVERIP')
self.ServerIP[0] = elements[1]
self.ServerIP[1] = int(elements[2])
self.FileSetServerIP()
else:
self.log('nothong change, server ip is: ' +str(self.ServerIP[0]))
self.log('nothong change, server port is: ' +str(self.ServerIP[1]))
except Exception as ex:
if str(ex)=='[Errno 110] ETIMEDOUT':
pass
elif str(ex)=='[Errno 11] EAGAIN':
pass
elif str(ex)=='[Errno 22] EINVAL':
pass
else:
self.log(str(ex))
def get_random_time(self):
num = self.translate(urandom.getrandbits(16), 0, 65535, 0, 10000)
integer = math.floor(num)/1000 # the translate function returns a float, which we gotta deal with somehow
return integer
def translate(self,value, leftMin, leftMax, rightMin, rightMax):
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (float)
valueScaled = float(value - leftMin) / float(leftSpan)
# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)
def checkFileAndCreate(self,dir,filename):
dirfilename = dir+'/'+filename
self.log('check file ' + dirfilename)
listfile = os.listdir(dir)
if not (filename in listfile):
self.log('file ' + dirfilename + ' not found, it will created')
file = open(dirfilename,'w')
file.close()
def log(self,message):
print(message)
def FileGetServerIP(self):
self.log('getting SERVER IP from file: ' + self.IPfile)
file = open(self.IPfile,'r')
strServerIP = file.readline()
file.close()
if strServerIP!='':
self.log('SERVER IP found: ' + strServerIP)
elements = str(strServerIP).split(',')
self.ServerIP[0] = elements[0]
self.ServerIP[1] = int(elements[1])
self.log('And set')
else:
self.ServerIP = ['192.168.1.11',51083]
self.log('SERVER IP NOT found: ' + str(self.ServerIP))
def FileSetServerIP(self):
self.log('putting SERVER IP to file: ' + self.IPfile)
try:
file = open(self.IPfile,'w')
file.write(self.ServerIP[0]+','+str(self.ServerIP[1]))
file.close()
self.log('done')
except Exception as ex:
self.log(str(ex))
def send_bc_msg(self):
self.log('inside sendSocketBroadcast')
try:
self.log('Try to send message to the server: '+ self.message + ' at : ' + str((self.ServerIP[0],self.portBroadCast)))
self.sockBroadCast.sendto(self.message,(self.ServerIP[0],self.portBroadCast))
self.log('Sent message to the server: '+ self.message)
except Exception as ex:
self.log(str(ex))
def closesock(self):
self.sockBroadCast.close() |
the-stack_106_22295 | """OpenCL target independent of PyOpenCL."""
from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2015 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
from loopy.target.c import CFamilyTarget, CFamilyASTBuilder
from loopy.target.c.codegen.expression import ExpressionToCExpressionMapper
from pytools import memoize_method
from loopy.diagnostic import LoopyError
from loopy.types import NumpyType
from loopy.target.c import DTypeRegistryWrapper, c_math_mangler
from loopy.kernel.data import AddressSpace, CallMangleInfo
from pymbolic import var
from functools import partial
# {{{ dtype registry wrappers
class DTypeRegistryWrapperWithAtomics(DTypeRegistryWrapper):
def get_or_register_dtype(self, names, dtype=None):
if dtype is not None:
from loopy.types import AtomicNumpyType, NumpyType
if isinstance(dtype, AtomicNumpyType):
return super(self.wrapped_registry.get_or_register_dtype(
names, NumpyType(dtype.dtype)))
return super(DTypeRegistryWrapperWithAtomics, self).get_or_register_dtype(
names, dtype)
class DTypeRegistryWrapperWithCL1Atomics(DTypeRegistryWrapperWithAtomics):
def dtype_to_ctype(self, dtype):
from loopy.types import AtomicNumpyType
if isinstance(dtype, AtomicNumpyType):
return "volatile " + self.wrapped_registry.dtype_to_ctype(dtype)
else:
return super(DTypeRegistryWrapperWithCL1Atomics, self).dtype_to_ctype(
dtype)
# }}}
# {{{ vector types
class vec: # noqa
pass
def _create_vector_types():
field_names = ["x", "y", "z", "w"]
vec.types = {}
vec.names_and_dtypes = []
vec.type_to_scalar_and_count = {}
counts = [2, 3, 4, 8, 16]
for base_name, base_type in [
('char', np.int8),
('uchar', np.uint8),
('short', np.int16),
('ushort', np.uint16),
('int', np.int32),
('uint', np.uint32),
('long', np.int64),
('ulong', np.uint64),
('float', np.float32),
('double', np.float64),
]:
for count in counts:
name = "%s%d" % (base_name, count)
titles = field_names[:count]
padded_count = count
if count == 3:
padded_count = 4
names = ["s%d" % i for i in range(count)]
while len(names) < padded_count:
names.append("padding%d" % (len(names)-count))
if len(titles) < len(names):
titles.extend((len(names)-len(titles))*[None])
try:
dtype = np.dtype(dict(
names=names,
formats=[base_type]*padded_count,
titles=titles))
except NotImplementedError:
try:
dtype = np.dtype([((n, title), base_type)
for (n, title) in zip(names, titles)])
except TypeError:
dtype = np.dtype([(n, base_type) for (n, title)
in zip(names, titles)])
setattr(vec, name, dtype)
vec.names_and_dtypes.append((name, dtype))
vec.types[np.dtype(base_type), count] = dtype
vec.type_to_scalar_and_count[dtype] = np.dtype(base_type), count
_create_vector_types()
def _register_vector_types(dtype_registry):
for name, dtype in vec.names_and_dtypes:
dtype_registry.get_or_register_dtype(name, dtype)
# }}}
# {{{ function mangler
_CL_SIMPLE_MULTI_ARG_FUNCTIONS = {
"rsqrt": 1,
"clamp": 3,
"atan2": 2,
}
VECTOR_LITERAL_FUNCS = dict(
("make_%s%d" % (name, count), (name, dtype, count))
for name, dtype in [
('char', np.int8),
('uchar', np.uint8),
('short', np.int16),
('ushort', np.uint16),
('int', np.int32),
('uint', np.uint32),
('long', np.int64),
('ulong', np.uint64),
('float', np.float32),
('double', np.float64),
]
for count in [2, 3, 4, 8, 16]
)
def opencl_function_mangler(kernel, name, arg_dtypes):
if not isinstance(name, str):
return None
# OpenCL has min(), max() for integer types
if name in ["max", "min"] and len(arg_dtypes) == 2:
dtype = np.find_common_type(
[], [dtype.numpy_dtype for dtype in arg_dtypes])
if dtype.kind == "i":
result_dtype = NumpyType(dtype)
return CallMangleInfo(
target_name=name,
result_dtypes=(result_dtype,),
arg_dtypes=2*(result_dtype,))
if name == "dot":
scalar_dtype, offset, field_name = arg_dtypes[0].numpy_dtype.fields["s0"]
return CallMangleInfo(
target_name=name,
result_dtypes=(NumpyType(scalar_dtype),),
arg_dtypes=(arg_dtypes[0],)*2)
if name in _CL_SIMPLE_MULTI_ARG_FUNCTIONS:
num_args = _CL_SIMPLE_MULTI_ARG_FUNCTIONS[name]
if len(arg_dtypes) != num_args:
raise LoopyError("%s takes %d arguments (%d received)"
% (name, num_args, len(arg_dtypes)))
dtype = np.find_common_type(
[], [dtype.numpy_dtype for dtype in arg_dtypes])
if dtype.kind == "c":
raise LoopyError("%s does not support complex numbers"
% name)
result_dtype = NumpyType(dtype)
return CallMangleInfo(
target_name=name,
result_dtypes=(result_dtype,),
arg_dtypes=(result_dtype,)*num_args)
if name in VECTOR_LITERAL_FUNCS:
base_tp_name, dtype, count = VECTOR_LITERAL_FUNCS[name]
if count != len(arg_dtypes):
return None
return CallMangleInfo(
target_name="(%s%d) " % (base_tp_name, count),
result_dtypes=(kernel.target.vector_dtype(
NumpyType(dtype), count),),
arg_dtypes=(NumpyType(dtype),)*count)
return None
# }}}
# {{{ symbol mangler
def opencl_symbol_mangler(kernel, name):
# FIXME: should be more picky about exact names
if name.startswith("FLT_"):
return NumpyType(np.dtype(np.float32)), name
elif name.startswith("DBL_"):
return NumpyType(np.dtype(np.float64)), name
elif name.startswith("M_"):
if name.endswith("_F"):
return NumpyType(np.dtype(np.float32)), name
else:
return NumpyType(np.dtype(np.float64)), name
elif name == "INFINITY":
return NumpyType(np.dtype(np.float32)), name
elif name.startswith("INT_"):
return NumpyType(np.dtype(np.int32)), name
elif name.startswith("LONG_"):
return NumpyType(np.dtype(np.int64)), name
else:
return None
# }}}
# {{{ preamble generator
def opencl_preamble_generator(preamble_info):
has_double = False
for dtype in preamble_info.seen_dtypes:
if (isinstance(dtype, NumpyType)
and dtype.numpy_dtype in [np.float64, np.complex128]):
has_double = True
if has_double:
yield ("00_enable_double", """
#if __OPENCL_C_VERSION__ < 120
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#endif
""")
from loopy.types import AtomicNumpyType
seen_64_bit_atomics = any(
isinstance(dtype, AtomicNumpyType) and dtype.numpy_dtype.itemsize == 8
for dtype in preamble_info.seen_atomic_dtypes)
if seen_64_bit_atomics:
# FIXME: Should gate on "CL1" atomics style
yield ("00_enable_64bit_atomics", """
#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
""")
from loopy.tools import remove_common_indentation
kernel = preamble_info.kernel
yield ("00_declare_gid_lid",
remove_common_indentation("""
#define lid(N) ((%(idx_ctype)s) get_local_id(N))
#define gid(N) ((%(idx_ctype)s) get_group_id(N))
""" % dict(idx_ctype=kernel.target.dtype_to_typename(
kernel.index_dtype))))
# }}}
# {{{ expression mapper
class ExpressionToOpenCLCExpressionMapper(ExpressionToCExpressionMapper):
def map_group_hw_index(self, expr, type_context):
return var("gid")(expr.axis)
def map_local_hw_index(self, expr, type_context):
return var("lid")(expr.axis)
# }}}
# {{{ target
class OpenCLTarget(CFamilyTarget):
"""A target for the OpenCL C heterogeneous compute programming language.
"""
def __init__(self, atomics_flavor=None):
"""
:arg atomics_flavor: one of ``"cl1"`` (C11-style atomics from OpenCL 2.0),
``"cl1"`` (OpenCL 1.1 atomics, using bit-for-bit compare-and-swap
for floating point), ``"cl1-exch"`` (OpenCL 1.1 atomics, using
double-exchange for floating point--not yet supported).
"""
super(OpenCLTarget, self).__init__()
if atomics_flavor is None:
atomics_flavor = "cl1"
if atomics_flavor not in ["cl1", "cl2"]:
raise ValueError("unsupported atomics flavor: %s" % atomics_flavor)
self.atomics_flavor = atomics_flavor
def split_kernel_at_global_barriers(self):
return True
def get_device_ast_builder(self):
return OpenCLCASTBuilder(self)
@memoize_method
def get_dtype_registry(self):
from loopy.target.c.compyte.dtypes import (DTypeRegistry,
fill_registry_with_opencl_c_types)
result = DTypeRegistry()
fill_registry_with_opencl_c_types(result)
# no complex number support--needs PyOpenCLTarget
_register_vector_types(result)
if self.atomics_flavor == "cl1":
return DTypeRegistryWrapperWithCL1Atomics(result)
else:
raise NotImplementedError("atomics flavor: %s" % self.atomics_flavor)
def is_vector_dtype(self, dtype):
return (isinstance(dtype, NumpyType)
and dtype.numpy_dtype in list(vec.types.values()))
def vector_dtype(self, base, count):
return NumpyType(
vec.types[base.numpy_dtype, count],
target=self)
# }}}
# {{{ ast builder
class OpenCLCASTBuilder(CFamilyASTBuilder):
# {{{ library
def function_manglers(self):
return (
[
opencl_function_mangler,
partial(c_math_mangler, modify_name=False)
] +
super(OpenCLCASTBuilder, self).function_manglers())
def symbol_manglers(self):
return (
super(OpenCLCASTBuilder, self).symbol_manglers() + [
opencl_symbol_mangler
])
def preamble_generators(self):
from loopy.library.reduction import reduction_preamble_generator
return (
super(OpenCLCASTBuilder, self).preamble_generators() + [
opencl_preamble_generator,
reduction_preamble_generator,
])
# }}}
# {{{ top-level codegen
def get_function_declaration(self, codegen_state, codegen_result,
schedule_index):
fdecl = super(OpenCLCASTBuilder, self).get_function_declaration(
codegen_state, codegen_result, schedule_index)
from loopy.target.c import FunctionDeclarationWrapper
assert isinstance(fdecl, FunctionDeclarationWrapper)
fdecl = fdecl.subdecl
from cgen.opencl import CLKernel, CLRequiredWorkGroupSize
fdecl = CLKernel(fdecl)
from loopy.schedule import get_insn_ids_for_block_at
_, local_sizes = codegen_state.kernel.get_grid_sizes_for_insn_ids_as_exprs(
get_insn_ids_for_block_at(
codegen_state.kernel.schedule, schedule_index))
from loopy.symbolic import get_dependencies
if not get_dependencies(local_sizes):
# sizes can't have parameter dependencies if they are
# to be used in static WG size.
fdecl = CLRequiredWorkGroupSize(local_sizes, fdecl)
return FunctionDeclarationWrapper(fdecl)
def generate_top_of_body(self, codegen_state):
from loopy.kernel.data import ImageArg
if any(isinstance(arg, ImageArg) for arg in codegen_state.kernel.args):
from cgen import Value, Const, Initializer
return [
Initializer(Const(Value("sampler_t", "loopy_sampler")),
"CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP "
"| CLK_FILTER_NEAREST")
]
return []
# }}}
# {{{ code generation guts
def get_expression_to_c_expression_mapper(self, codegen_state):
return ExpressionToOpenCLCExpressionMapper(codegen_state)
def add_vector_access(self, access_expr, index):
# The 'int' avoids an 'L' suffix for long ints.
return access_expr.attr("s%s" % hex(int(index))[2:])
def emit_barrier(self, synchronization_kind, mem_kind, comment):
"""
:arg kind: ``"local"`` or ``"global"``
:return: a :class:`loopy.codegen.GeneratedInstruction`.
"""
if synchronization_kind == "local":
if comment:
comment = " /* %s */" % comment
mem_kind = mem_kind.upper()
from cgen import Statement
return Statement("barrier(CLK_%s_MEM_FENCE)%s" % (mem_kind, comment))
elif synchronization_kind == "global":
raise LoopyError("OpenCL does not have global barriers")
else:
raise LoopyError("unknown barrier kind")
def wrap_temporary_decl(self, decl, scope):
if scope == AddressSpace.LOCAL:
from cgen.opencl import CLLocal
return CLLocal(decl)
elif scope == AddressSpace.PRIVATE:
return decl
else:
raise ValueError("unexpected temporary variable scope: %s"
% scope)
def wrap_global_constant(self, decl):
from cgen.opencl import CLConstant
return CLConstant(decl)
def get_array_arg_decl(self, name, mem_address_space, shape, dtype, is_written):
from cgen.opencl import CLGlobal, CLLocal
from loopy.kernel.data import AddressSpace
if mem_address_space == AddressSpace.LOCAL:
return CLLocal(super(OpenCLCASTBuilder, self).get_array_arg_decl(
name, mem_address_space, shape, dtype, is_written))
elif mem_address_space == AddressSpace.PRIVATE:
return super(OpenCLCASTBuilder, self).get_array_arg_decl(
name, mem_address_space, shape, dtype, is_written)
elif mem_address_space == AddressSpace.GLOBAL:
return CLGlobal(super(OpenCLCASTBuilder, self).get_array_arg_decl(
name, mem_address_space, shape, dtype, is_written))
else:
raise ValueError("unexpected array argument scope: %s"
% mem_address_space)
def get_global_arg_decl(self, name, shape, dtype, is_written):
from loopy.kernel.data import AddressSpace
from warnings import warn
warn("get_global_arg_decl is deprecated use get_array_arg_decl "
"instead.", DeprecationWarning, stacklevel=2)
return self.get_array_arg_decl(name, AddressSpace.GLOBAL, shape,
dtype, is_written)
def get_image_arg_decl(self, name, shape, num_target_axes, dtype, is_written):
if is_written:
mode = "w"
else:
mode = "r"
from cgen.opencl import CLImage
return CLImage(num_target_axes, mode, name)
def get_constant_arg_decl(self, name, shape, dtype, is_written):
from loopy.target.c import POD # uses the correct complex type
from cgen import RestrictPointer, Const
from cgen.opencl import CLConstant
arg_decl = RestrictPointer(POD(self, dtype, name))
if not is_written:
arg_decl = Const(arg_decl)
return CLConstant(arg_decl)
# {{{
def emit_atomic_init(self, codegen_state, lhs_atomicity, lhs_var,
lhs_expr, rhs_expr, lhs_dtype, rhs_type_context):
# for the CL1 flavor, this is as simple as a regular update with whatever
# the RHS value is...
return self.emit_atomic_update(codegen_state, lhs_atomicity, lhs_var,
lhs_expr, rhs_expr, lhs_dtype, rhs_type_context)
# }}}
# {{{ code generation for atomic update
def emit_atomic_update(self, codegen_state, lhs_atomicity, lhs_var,
lhs_expr, rhs_expr, lhs_dtype, rhs_type_context):
from pymbolic.mapper.stringifier import PREC_NONE
# FIXME: Could detect operations, generate atomic_{add,...} when
# appropriate.
if isinstance(lhs_dtype, NumpyType) and lhs_dtype.numpy_dtype in [
np.int32, np.int64, np.float32, np.float64]:
from cgen import Block, DoWhile, Assign
from loopy.target.c import POD
old_val_var = codegen_state.var_name_generator("loopy_old_val")
new_val_var = codegen_state.var_name_generator("loopy_new_val")
from loopy.kernel.data import TemporaryVariable, AddressSpace
ecm = codegen_state.expression_to_code_mapper.with_assignments(
{
old_val_var: TemporaryVariable(old_val_var, lhs_dtype),
new_val_var: TemporaryVariable(new_val_var, lhs_dtype),
})
lhs_expr_code = ecm(lhs_expr, prec=PREC_NONE, type_context=None)
from pymbolic.mapper.substitutor import make_subst_func
from pymbolic import var
from loopy.symbolic import SubstitutionMapper
subst = SubstitutionMapper(
make_subst_func({lhs_expr: var(old_val_var)}))
rhs_expr_code = ecm(subst(rhs_expr), prec=PREC_NONE,
type_context=rhs_type_context,
needed_dtype=lhs_dtype)
if lhs_dtype.numpy_dtype.itemsize == 4:
func_name = "atomic_cmpxchg"
elif lhs_dtype.numpy_dtype.itemsize == 8:
func_name = "atom_cmpxchg"
else:
raise LoopyError("unexpected atomic size")
cast_str = ""
old_val = old_val_var
new_val = new_val_var
if lhs_dtype.numpy_dtype.kind == "f":
if lhs_dtype.numpy_dtype == np.float32:
ctype = "int"
elif lhs_dtype.numpy_dtype == np.float64:
ctype = "long"
else:
assert False
from loopy.kernel.data import (TemporaryVariable, ArrayArg)
if (
isinstance(lhs_var, ArrayArg)
and
lhs_var.address_space == AddressSpace.GLOBAL):
var_kind = "__global"
elif (
isinstance(lhs_var, ArrayArg)
and
lhs_var.address_space == AddressSpace.LOCAL):
var_kind = "__local"
elif (
isinstance(lhs_var, TemporaryVariable)
and lhs_var.address_space == AddressSpace.LOCAL):
var_kind = "__local"
elif (
isinstance(lhs_var, TemporaryVariable)
and lhs_var.address_space == AddressSpace.GLOBAL):
var_kind = "__global"
else:
raise LoopyError("unexpected kind of variable '%s' in "
"atomic operation: '%s'"
% (lhs_var.name, type(lhs_var).__name__))
old_val = "*(%s *) &" % ctype + old_val
new_val = "*(%s *) &" % ctype + new_val
cast_str = "(%s %s *) " % (var_kind, ctype)
return Block([
POD(self, NumpyType(lhs_dtype.dtype, target=self.target),
old_val_var),
POD(self, NumpyType(lhs_dtype.dtype, target=self.target),
new_val_var),
DoWhile(
"%(func_name)s("
"%(cast_str)s&(%(lhs_expr)s), "
"%(old_val)s, "
"%(new_val)s"
") != %(old_val)s"
% {
"func_name": func_name,
"cast_str": cast_str,
"lhs_expr": lhs_expr_code,
"old_val": old_val,
"new_val": new_val,
},
Block([
Assign(old_val_var, lhs_expr_code),
Assign(new_val_var, rhs_expr_code),
])
)
])
else:
raise NotImplementedError("atomic update for '%s'" % lhs_dtype)
# }}}
# }}}
# }}}
# {{{ volatile mem acccess target
class VolatileMemExpressionToOpenCLCExpressionMapper(
ExpressionToOpenCLCExpressionMapper):
def make_subscript(self, array, base_expr, subscript):
registry = self.codegen_state.ast_builder.target.get_dtype_registry()
from loopy.kernel.data import AddressSpace
if array.address_space == AddressSpace.GLOBAL:
aspace = "__global "
elif array.address_space == AddressSpace.LOCAL:
aspace = "__local "
elif array.address_space == AddressSpace.PRIVATE:
aspace = ""
else:
raise ValueError("unexpected value of address space")
from pymbolic import var
return var(
"(%s volatile %s *) "
% (
registry.dtype_to_ctype(array.dtype),
aspace,
)
)(base_expr)[subscript]
class VolatileMemOpenCLCASTBuilder(OpenCLCASTBuilder):
def get_expression_to_c_expression_mapper(self, codegen_state):
return VolatileMemExpressionToOpenCLCExpressionMapper(codegen_state)
class VolatileMemOpenCLTarget(OpenCLTarget):
def get_device_ast_builder(self):
return VolatileMemOpenCLCASTBuilder(self)
# }}}
# vim: foldmethod=marker
|
the-stack_106_22297 | #CMPUT404W22 dchu Assignment 1
import socketserver, os
# Copyright 2013 Abram Hindle, Eddie Antonio Santos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
#GlobalDefines
G_DIRECTORY_ROOT = "www"#content directory
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
dataLines = self.data.decode("utf-8").split("\n")#get each line of request
if len(dataLines) == 0 or dataLines[0] == '':
return#ignore odd behavoir with empty requests
#deal with first line of header
requestTypeName = dataLines[0].split(' ')[0]
requestDirectory = dataLines[0].split(' ')[1]
if requestTypeName == "GET":
if os.path.isfile(G_DIRECTORY_ROOT + requestDirectory):#test if file exists
self.parseAndHandleFile(requestDirectory)
elif os.path.isfile(G_DIRECTORY_ROOT + requestDirectory + "index.html"):#test if index is found
self.parseAndHandleFile(requestDirectory+ "index.html")
elif os.path.isdir(G_DIRECTORY_ROOT + requestDirectory):#test for path correction
self.request.sendall(bytearray("HTTP/1.1 301 Moved Permanently\r\nLocation: {}\r\n".format(requestDirectory + '/'),'utf-8'))
else:#Directory doesn't exist
self.request.sendall(bytearray("HTTP/1.1 404 Not Found\r\n",'utf-8'))
#print("Got a request for file that doesn't exist")
else:#Request isn't get
self.request.sendall(bytearray("HTTP/1.1 405 Method Not Allowed\r\n",'utf-8'))
#print("Got an unsupported request")
def returnContent(self, ContentType, path):#return a valid file
f = open(path)
msg = 'HTTP/1.1 200 OK\r\nContent-Type: {}\r\n\r\n{}'.format(ContentType, f.read())
f.close()
self.request.sendall(bytearray(msg, "utf-8"))
def parseAndHandleFile(self, requestDirectory):
if requestDirectory[-5:] == ".html":
self.returnContent("text/html", G_DIRECTORY_ROOT + requestDirectory)
#print("HTML page requested!")
elif requestDirectory[-4:] == ".css":
self.returnContent("text/css", G_DIRECTORY_ROOT + requestDirectory)
#print("CSS page requested!")
else:
self.request.sendall(bytearray("HTTP/1.1 404 Not Found\r\n",'utf-8'))
#print("Got a request for an unsupported file")
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
#print("Starting server....")
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
|
the-stack_106_22298 | # coding: utf-8
# 2021/5/20 @ tongshiwei
import pytest
from EduNLP.SIF.segment import seg
from EduNLP.utils import image2base64
def test_segment(figure0, figure1, figure0_base64, figure1_base64):
seg(
r"如图所示,则$\FormFigureID{0}$的面积是$\SIFBlank$。$\FigureID{1}$",
figures={
"0": figure0,
"1": figure1
}
)
s = seg(
r"如图所示,则$\FormFigureBase64{%s}$的面积是$\SIFBlank$。$\FigureBase64{%s}$" % (figure0_base64, figure1_base64),
figures=True
)
with pytest.raises(TypeError):
s.append("123")
seg_test_text = seg(
r"如图所示,有三组$\textf{机器人,bu}$在踢$\textf{足球,b}$",
figures=True
)
assert seg_test_text.text_segments == ['如图所示,有三组机器人在踢足球']
|
the-stack_106_22300 | """Collection of tests for unified neural network activation functions."""
# global
import pytest
import numpy as np
from hypothesis import given, strategies as st
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
import ivy.functional.backends.numpy as ivy_np
# relu
@given(
x=st.lists(st.floats()),
dtype=st.sampled_from(ivy.float_dtypes),
as_variable=st.booleans(),
with_out=st.booleans(),
native_array=st.booleans(),
)
def test_relu(x, dtype, as_variable, with_out, native_array, fw):
if dtype in ivy.invalid_dtypes:
return # invalid dtype
if dtype == "float16" and fw == "torch":
return # torch does not support float16 for relu
x = ivy.array(x, dtype=dtype)
if as_variable:
if not ivy.is_float_dtype(dtype):
return # only floating point variables are supported
if with_out:
return # variables do not support out argument
x = ivy.variable(x)
if native_array:
x = x.data
ret = ivy.relu(x)
out = ret
if with_out:
if as_variable:
out = ivy.variable(out)
if native_array:
out = out.data
ret = ivy.relu(x, out=out)
if not native_array:
assert ret is out
if fw in ["tensorflow", "jax"]:
# these backends do not support native inplace updates
return
assert ret.data is (out if native_array else out.data)
# value test
if dtype == "bfloat16":
return # bfloat16 is not supported by numpy
assert np.allclose(
np.nan_to_num(ivy.to_numpy(ret)), np.nan_to_num(ivy_np.relu(ivy.to_numpy(x)))
)
# leaky_relu
@pytest.mark.parametrize("x", [[[-1.0, 1.0, 2.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_leaky_relu(x, dtype, tensor_fn, device, call):
# smoke test
x = tensor_fn(x, dtype, device)
ret = ivy.leaky_relu(x)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.leaky_relu, x), ivy_np.leaky_relu(ivy.to_numpy(x)))
# gelu
@pytest.mark.parametrize("x", [[[-1.0, 1.0, 2.0]]])
@pytest.mark.parametrize("approx", [True, False])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_gelu(x, approx, dtype, tensor_fn, device, call):
# smoke test
x = tensor_fn(x, dtype, device)
ret = ivy.gelu(x, approx)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.gelu, x, approx), ivy_np.gelu(ivy.to_numpy(x), approx))
# tanh
@pytest.mark.parametrize("x", [[[-1.0, 1.0, 2.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_tanh(x, dtype, tensor_fn, device, call):
# smoke test
x = tensor_fn(x, dtype, device)
ret = ivy.tanh(x)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.tanh, x), ivy_np.tanh(ivy.to_numpy(x)))
# sigmoid
@pytest.mark.parametrize("x", [[[-1.0, 1.0, 2.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_sigmoid(x, dtype, tensor_fn, device, call):
# smoke test
x = tensor_fn(x, dtype, device)
ret = ivy.sigmoid(x)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.sigmoid, x), ivy_np.sigmoid(ivy.to_numpy(x)))
# softmax
@pytest.mark.parametrize("x", [[[-1.0, 1.0, 2.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_softmax(x, dtype, tensor_fn, device, call):
# smoke test
x = tensor_fn(x, dtype, device)
ret = ivy.softmax(x)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.softmax, x), ivy_np.softmax(ivy.to_numpy(x)))
# softplus
@pytest.mark.parametrize("x", [[[-1.0, 1.0, 2.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_softplus(x, dtype, tensor_fn, device, call):
# smoke test
x = tensor_fn(x, dtype, device)
ret = ivy.softplus(x)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.softplus, x), ivy_np.softplus(ivy.to_numpy(x)))
|
the-stack_106_22301 | from paypalrestsdk import Payout, ResourceNotFound
import random
import string
sender_batch_id = ''.join(
random.choice(string.ascii_uppercase) for i in range(12))
payout = Payout({
"sender_batch_header": {
"sender_batch_id": sender_batch_id,
"email_subject": "You have a payment"
},
"items": [
{
"recipient_type": "EMAIL",
"amount": {
"value": 0.99,
"currency": "USD"
},
"receiver": "[email protected]",
"note": "Thank you.",
"sender_item_id": "item_1"
}
]
})
if payout.create(sync_mode=True):
print("payout[%s] created successfully" %
(payout.batch_header.payout_batch_id))
else:
print(payout.error)
|
the-stack_106_22302 |
import os
import numpy as np
import numpy.ctypeslib as clib
c_int = clib.ctypes.c_int
c_int8 = clib.ctypes.c_int8
c_int16 = clib.ctypes.c_int16
c_int32 = clib.ctypes.c_int32
c_int64 = clib.ctypes.c_int64
c_dbl = clib.ctypes.c_double
c_dPt = clib.ndpointer(dtype=np.float64, flags="C_CONTIGUOUS")
c_dPt = clib.ndpointer(dtype=np.float64, flags="C_CONTIGUOUS")
c_i8Pt = clib.ndpointer(dtype=np.int8, flags="C_CONTIGUOUS")
c_i16Pt = clib.ndpointer(dtype=np.int16, flags="C_CONTIGUOUS")
c_i32Pt = clib.ndpointer(dtype=np.int32, flags="C_CONTIGUOUS")
c_i64Pt = clib.ndpointer(dtype=np.int64, flags="C_CONTIGUOUS")
c_iPt = clib.ndpointer(dtype=np.int32, flags="C_CONTIGUOUS")
if os.name == 'nt':
_seisloclib = clib.load_library(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../lib/SeisLoc.dll'), '.')
else: # posix
_seisloclib = clib.load_library(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../lib/SeisLoc.so'), '.')
_seisloclib.onset.argtypes = [c_dPt, c_int, c_int, c_int, c_int, c_dPt]
_seisloclib.onset_mp.argtypes = [c_dPt, c_int, c_int, c_int, c_int, c_int, c_dPt]
def onset(env, stw, ltw, gap):
ntr = env[..., 0].size
nsamp = env.shape[-1]
out = np.zeros(env.shape, dtype=np.float64)
env = np.ascontiguousarray(env, np.float64)
if ntr > 1:
_seisloclib.onset_mp(env, ntr, nsamp, int(stw), int(ltw), int(gap), out)
return out
else:
_seisloclib.onset(env, nsamp, int(stw), int(ltw), int(gap), out)
return out
_seisloclib.levinson.argtypes = [c_dPt, c_int, c_dPt, c_dPt, c_dPt, c_dPt]
_seisloclib.levinson_mp.argtypes = [c_dPt, c_int, c_int, c_int, c_dPt, c_dPt, c_dPt, c_dPt]
_seisloclib.nlevinson.argtypes = [c_dPt, c_int, c_dPt, c_dPt]
_seisloclib.nlevinson_mp.argtypes = [c_dPt, c_int, c_int, c_dPt, c_dPt]
def levinson(acc, order, return_error=False):
acc = np.array(acc, dtype=np.double)
if acc.ndim > 1:
nsamp = acc.shape[-1]
nchan = acc[..., 0].size
chan = acc.shape[:-1]
a = np.zeros(chan + (order+1,), dtype=np.double)
e = np.zeros(chan, dtype=np.double)
k = np.zeros(chan + (order,), dtype=np.double)
tmp = np.zeros(chan + (order,), dtype=np.double)
_seisloclib.levinson_mp(acc, nchan, nsamp, order, a,
e, k, tmp)
else:
nsamp = acc.shape[-1]
order = min(order, nsamp-1)
a = np.zeros(order+1, dtype=np.double)
e = np.zeros(1, dtype=np.double)
k = np.zeros(order, dtype=np.double)
tmp = np.zeros(order, dtype=np.double)
_seisloclib.levinson(acc, order, a,
e, k, tmp)
if return_error:
return a, k, e
else:
return a, k
def nlevinson(acc):
acc = np.array(acc, dtype=np.double)
if acc.ndim > 1:
nsamp = acc.shape[-1]
nchan = acc[..., 0].size
a = np.zeros(acc.shape, dtype=np.double)
tmp = np.zeros(acc.shape, dtype=np.double)
_seisloclib.nlevinson_mp(acc, nchan, nsamp,
a, tmp)
else:
nsamp = acc.shape[-1]
a = np.zeros(nsamp, dtype=np.double)
tmp = np.zeros(nsamp, dtype=np.double)
_seisloclib.nlevinson(acc, nsamp,
a, tmp)
return a
_seisloclib.scan4d.argtypes = [c_dPt, c_i32Pt, c_dPt, c_int32, c_int32, c_int32, c_int32, c_int64, c_int64]
_seisloclib.detect4d.argtypes = [c_dPt, c_dPt, c_i64Pt,c_int32, c_int32, c_int32, c_int64, c_int64]
# _seisloclib.detect4d_t.argtypes = [c_dPt, c_dPt, c_i64Pt,c_int32, c_int32, c_int32, c_int64, c_int64]
def scan(sig, tt, fsmp,lsmp, nsamp, map4d, threads):
nstn, ssmp = sig.shape
if not tt.shape[-1] == nstn:
raise ValueError('Mismatch between number of stations for data and LUT, {} - {}.'.format(
nstn, tt.shape[-1]))
ncell = tt.shape[:-1]
tcell = np.prod(ncell)
if map4d.size < nsamp*tcell:
raise ValueError('4D-Array is too small.')
if sig.size < nsamp + fsmp:
raise ValueError('Data array smaller than Coalescence array')
_seisloclib.scan4d(sig, tt, map4d, c_int32(fsmp), c_int32(lsmp),c_int32(nsamp), c_int32(nstn), c_int64(tcell), c_int64(threads))
def detect(mmap, dsnr, dind, fsmp, lsmp,threads):
nsamp = mmap.shape[-1]
ncell = np.prod(mmap.shape[:-1])
if dsnr.size < nsamp or dind.size < nsamp:
raise ValueError('Ouput array size too small, sample count = {}.'.format(nsamp))
_seisloclib.detect4d(mmap, dsnr, dind, c_int32(fsmp),c_int32(lsmp),c_int32(nsamp), c_int64(ncell), c_int64(threads))
# def detect_t(mmap, dsnr, dind, fsmp, lsmp, threads):
# nsamp = mmap.shape[0]
# ncell = np.prod(mmap.shape[1:])
# if dsnr.size < nsamp or dind.size < nsamp:
# raise ValueError('Ouput array size too small, sample count = {}.'.format(nsamp))
# _seisloclib.detect4d_t(mmap, dsnr, dind, c_int32(fsmp), c_int32(lsmp),
# c_int32(nsamp), c_int64(ncell), c_int64(threads))
|
the-stack_106_22305 | import config
import requests
import time
import random
import json
from utils import Stack, Queue
from room import Room
# token = config.TOKEN
# headers = {
# 'Authorization': f"Token {token}",
# 'Content-Type': 'application/json'
# }
# init_response = requests.get('https://lambda-treasure-hunt.herokuapp.com/api/adv/init/', headers=headers)
# data = init_response.json()
# print(data)
# time.sleep(data['cooldown'])
# room_exits = data['exits']
# roomID = data['room_id']
# roomInfo = f'room_id: {data["room_id"]}, title: {data["title"]}, coords: {data["coordinates"]}'
# print(roomInfo)
# traversalPath = []
# reversePath = []
# visitedRoom = {}
# MapRoom = []
# # Start with current room 0 and get it's exits
# # visitedRoom[roomID] = room_exits
# # Get stats
# res = requests.post("https://lambda-treasure-hunt.herokuapp.com/api/adv/status/", headers=headers)
# stats = res.json()
# print(stats)
# inventory = stats['inventory']
# time.sleep(data['cooldown'])
# # Traverse entire graph while the rooms visited is less than 500
# while len(visitedRoom) < 500:
# unvisited = []
# room_exits = data['exits']
# items = data['items']
# # take the items in room
# if len(items) > 0:
# for item in items:
# res = requests.post("https://lambda-treasure-hunt.herokuapp.com/api/adv/take/", json={'name': f'{item}'}, headers=headers)
# print(res)
# time.sleep(data['cooldown'])
# # check stats
# res = requests.post("https://lambda-treasure-hunt.herokuapp.com/api/adv/status/", headers=headers)
# stats = res.json()
# print(stats)
# time.sleep(data['cooldown'])
# visitedRoom[data["room_id"]] = data["coordinates"]
# print("visited room", visitedRoom)
# # Sell items at Shop
# if (data['title'] == 'Shop'):
# while len(inventory) > 0:
# res = requests.post("https://lambda-treasure-hunt.herokuapp.com/api/adv/sell/", json={'name':'treasure'}, headers=headers)
# time.sleep(data['cooldown'])
# print("Do you want to sell your treasure?")
# # Confirm to sell
# confirm_data = {
# "name":"tiny treasure",
# "confirm": input("Confirm 'yes' to sell: ")
# }
# res = requests.post("https://lambda-treasure-hunt.herokuapp.com/api/adv/sell/", json=confirm_data, headers=headers)
# print(res)
# time.sleep(data['cooldown'])
# # enter next direction
# post_data = {
# "direction": input("Enter your direction: ")
# }
# res = requests.post("https://lambda-treasure-hunt.herokuapp.com/api/adv//move", json=post_data, headers=headers)
# data = res.json()
# roomID = data['room_id']
# print(data)
# time.sleep(data['cooldown'])
# # Collect all rooms stored in the DB
# def get_room_dict():
# room_dict = {}
# room_list = requests.get('https://team2-bw.herokuapp.com/api/rooms/').json()
# for room in room_list:
# room_dict[room['id']] = room
# return room_dict
# # Variable for get_room_dict()
# rooms_we_have = get_room_dict()
# # Compares the current room to the list of visited rooms in the DB. If the ID doesn't exist, post current ID data.
# if data['room_id'] not in rooms_we_have:
# db_send = {
# "id": data["room_id"],
# "coordinates": data["coordinates"],
# "name": data["title"],
# "description": data["description"],
# }
# requests.post("https://team2-bw.herokuapp.com/api/rooms/", json=db_send).json()
token = config.TOKEN
headers = {
'Authorization': f"Token {token}",
'Content-Type': 'application/json'
}
def writeCurrentRoom(data):
with open('currentRoom.json', 'w') as currentRoom:
currentRoom.write(json.dumps(data))
def readCurrentRoom():
with open('currentRoom.json', 'r') as currentRoom:
data=currentRoom.read()
return json.loads(data)
writeCurrentRoom(requests.get('https://lambda-treasure-hunt.herokuapp.com/api/adv/init/', headers=headers).json())
time.sleep(readCurrentRoom()['cooldown'])
traversalGraph = {}
def movePlayerAndWait(direction):
current_room = readCurrentRoom()
print(f"moving in direction: {direction}")
post_data = {
"direction": direction
}
res = requests.post("https://lambda-treasure-hunt.herokuapp.com/api/adv//move", json=post_data, headers=headers)
writeCurrentRoom(res.json())
current_room = res.json()
print(f"waiting {current_room['cooldown']} secs")
time.sleep(current_room['cooldown'])
addCurrentRoomToGraph()
def addCurrentRoomToGraph():
current_room = readCurrentRoom()
tmp = {}
if current_room['room_id'] not in traversalGraph:
for i in current_room['exits']:
tmp[i] = "?"
traversalGraph[current_room['room_id']] = tmp
# send current room to backend
db_send = {
"id": current_room["room_id"],
"coordinates": current_room["coordinates"],
"name": current_room["title"],
"description": current_room["description"],
}
requests.post("https://team2-bw.herokuapp.com/api/rooms/", json=db_send).json()
print("posted to db")
def findUnexploredRoom():
current_room = readCurrentRoom()
q = Queue()
q.enqueue([current_room['room_id']])
while q.size():
path = q.dequeue()
room = path[-1]
for i in traversalGraph[room]:
if traversalGraph[room][i] == "?":
return path
else:
path_copy = path[:]
path_copy.append(traversalGraph[room][i])
q.enqueue(path_copy)
return None
def movePlayerToDeadEnd():
current_room = readCurrentRoom()
addCurrentRoomToGraph()
startRoom = current_room["room_id"]
startExits = current_room['exits']
nonExplored = []
for i in startExits:
if traversalGraph[startRoom][i] == "?":
nonExplored.append(i)
if len(nonExplored) < 1:
return
else:
direction = nonExplored[int(random.uniform(0, len(nonExplored) - 1))]
movePlayerAndWait(direction)
addCurrentRoomToGraph()
current_room = readCurrentRoom()
traversalGraph[startRoom][direction] = current_room["room_id"]
if direction == "n":
traversalGraph[current_room["room_id"]]["s"] = startRoom
elif direction == "e":
traversalGraph[current_room["room_id"]]["w"] = startRoom
elif direction == "s":
traversalGraph[current_room["room_id"]]["n"] = startRoom
elif direction == "w":
traversalGraph[current_room["room_id"]]["e"] = startRoom
movePlayerToDeadEnd()
def traverseThisMap():
current_room = readCurrentRoom()
movePlayerToDeadEnd()
unexploredPath = findUnexploredRoom()
if unexploredPath == None:
return
else:
for i in range(len(unexploredPath) - 1):
if current_room['room_id'] == unexploredPath[-1]:
break
for j in traversalGraph[current_room['room_id']]:
if traversalGraph[current_room['room_id']][j] == unexploredPath[i + 1]:
movePlayerAndWait(j)
current_room = readCurrentRoom()
traverseThisMap()
addCurrentRoomToGraph()
traverseThisMap() |
the-stack_106_22306 | import torch
import torch.nn as nn
import torch.functional as F
from torch.autograd import Variable
import torch.utils.data as Data
import torchvision
import matplotlib.pyplot as plt
from Dataloader import ImgDataset
import numpy as np
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
# self.nn = nn
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=64,
kernel_size=(5, 5), stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2)))
self.conv2 = nn.Sequential(
nn.Conv2d(64, 128, (3, 3), 1, 2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2)))
# self.conv3 = nn.Sequential(
# nn.Conv2d(256, 64, (3, 3), 1, 2),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2, 2)))
self.ln1 = nn.Linear(88704, 256)
self.out = nn.Linear(256, 3)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
# x = self.conv3(x)
x = x.view(x.size(0), -1)
x = self.ln1(x)
x = self.out(x)
# output = F.
output = x
return output
# def forward(self, x):
# x = self.conv1(x)
# x = self.conv2(x)
# x = x.view(x.size(0), -1)
# self.out = nn.Linear(x.size(1), 10).to(device_0)
# output = self.out(x)
# return output
# if isPoltSave is True:
# test_output = cnn(test_datas[:10])
# pred_y = torch.max(test_output, 1)[1].data.squeeze()
# print(pred_y, 'prediction number')
# print(test_labels[:10], 'real number')
if __name__ == "__main__":
device_0 = get_device()
train_path = "Data/train"
train_datasets = ImgDataset(train_path, isTrain=True)
test_path = "Data/test"
test_datasets = ImgDataset(test_path)
train_loader = Data.DataLoader(
dataset=train_datasets,
batch_size=6,
shuffle=True,
num_workers=2
)
cnn = CNN().to(device_0)
# print(cnn)
optimizer = torch.optim.Adam(cnn.parameters(), lr=1e-3)
loss_func = nn.CrossEntropyLoss()
EPOCH = 30
train_model(EPOCH, train_loader, test_datasets)
torch.save(cnn.state_dict(), 'out/cnn-model.pkl')
|
the-stack_106_22309 | """
This module defines tensors with abstract index notation.
The abstract index notation has been first formalized by Penrose.
Tensor indices are formal objects, with a tensor type; there is no
notion of index range, it is only possible to assign the dimension,
used to trace the Kronecker delta; the dimension can be a Symbol.
The Einstein summation convention is used.
The covariant indices are indicated with a minus sign in front of the index.
For instance the tensor ``t = p(a)*A(b,c)*q(-c)`` has the index ``c``
contracted.
A tensor expression ``t`` can be called; called with its
indices in sorted order it is equal to itself:
in the above example ``t(a, b) == t``;
one can call ``t`` with different indices; ``t(c, d) == p(c)*A(d,a)*q(-a)``.
The contracted indices are dummy indices, internally they have no name,
the indices being represented by a graph-like structure.
Tensors are put in canonical form using ``canon_bp``, which uses
the Butler-Portugal algorithm for canonicalization using the monoterm
symmetries of the tensors.
If there is a (anti)symmetric metric, the indices can be raised and
lowered when the tensor is put in canonical form.
"""
from __future__ import print_function, division
from collections import defaultdict
from sympy import Matrix, Rational
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, \
bsgs_direct_product, canonicalize, riemann_bsgs
from sympy.core import Basic, sympify, Add, S
from sympy.core.compatibility import string_types, reduce, range
from sympy.core.containers import Tuple
from sympy.core.decorators import deprecated
from sympy.core.symbol import Symbol, symbols
from sympy.core.sympify import CantSympify
from sympy.external import import_module
from sympy.utilities.decorator import doctest_depends_on
from sympy.matrices import eye
from sympy.functions.special.tensor_functions import LeviCivita
class TIDS(CantSympify):
"""
Tensor-index data structure. This contains internal data structures about
components of a tensor expression, its free and dummy indices.
To create a ``TIDS`` object via the standard constructor, the required
arguments are
WARNING: this class is meant as an internal representation of tensor data
structures and should not be directly accessed by end users.
Parameters
==========
components : ``TensorHead`` objects representing the components of the tensor expression.
free : Free indices in their internal representation.
dum : Dummy indices in their internal representation.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> TIDS([T], [(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz)], [(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
Notes
=====
In short, this has created the components, free and dummy indices for
the internal representation of a tensor T(m0, m1, -m1, m3).
Free indices are represented as a list of triplets. The elements of
each triplet identify a single free index and are
1. TensorIndex object
2. position inside the component
3. component number
Dummy indices are represented as a list of 4-plets. Each 4-plet stands
for couple for contracted indices, their original TensorIndex is not
stored as it is no longer required. The four elements of the 4-plet
are
1. position inside the component of the first index.
2. position inside the component of the second index.
3. component number of the first index.
4. component number of the second index.
"""
def __init__(self, components, free, dum):
self.components = components
self.free = free
self.dum = dum
self._ext_rank = len(self.free) + 2*len(self.dum)
self.dum.sort(key=lambda x: (x[2], x[0]))
def get_tensors(self):
"""
Get a list of ``Tensor`` objects having the same ``TIDS`` if multiplied
by one another.
"""
indices = self.get_indices()
components = self.components
tensors = [None for i in components] # pre-allocate list
ind_pos = 0
for i, component in enumerate(components):
prev_pos = ind_pos
ind_pos += component.rank
tensors[i] = Tensor(component, indices[prev_pos:ind_pos])
return tensors
def get_components_with_free_indices(self):
"""
Get a list of components with their associated indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> t = TIDS.from_components_and_indices([T], [m0, m1, -m1, m3])
>>> t.get_components_with_free_indices()
[(T(Lorentz,Lorentz,Lorentz,Lorentz), [(m0, 0, 0), (m3, 3, 0)])]
>>> t2 = (A(m0)*A(-m0))._tids
>>> t2.get_components_with_free_indices()
[(A(Lorentz), []), (A(Lorentz), [])]
>>> t3 = (A(m0)*A(-m1)*A(-m0)*A(m1))._tids
>>> t3.get_components_with_free_indices()
[(A(Lorentz), []), (A(Lorentz), []), (A(Lorentz), []), (A(Lorentz), [])]
>>> t4 = (A(m0)*A(m1)*A(-m0))._tids
>>> t4.get_components_with_free_indices()
[(A(Lorentz), []), (A(Lorentz), [(m1, 0, 1)]), (A(Lorentz), [])]
>>> t5 = (A(m0)*A(m1)*A(m2))._tids
>>> t5.get_components_with_free_indices()
[(A(Lorentz), [(m0, 0, 0)]), (A(Lorentz), [(m1, 0, 1)]), (A(Lorentz), [(m2, 0, 2)])]
"""
components = self.components
ret_comp = []
free_counter = 0
if len(self.free) == 0:
return [(comp, []) for comp in components]
for i, comp in enumerate(components):
c_free = []
while free_counter < len(self.free):
if not self.free[free_counter][2] == i:
break
c_free.append(self.free[free_counter])
free_counter += 1
if free_counter >= len(self.free):
break
ret_comp.append((comp, c_free))
return ret_comp
@staticmethod
def from_components_and_indices(components, indices):
"""
Create a new ``TIDS`` object from ``components`` and ``indices``
``components`` ``TensorHead`` objects representing the components
of the tensor expression.
``indices`` ``TensorIndex`` objects, the indices. Contractions are
detected upon construction.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> TIDS.from_components_and_indices([T], [m0, m1, -m1, m3])
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz)], [(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
In case of many components the same indices have slightly different
indexes:
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> TIDS.from_components_and_indices([A]*4, [m0, m1, -m1, m3])
TIDS([A(Lorentz), A(Lorentz), A(Lorentz), A(Lorentz)], [(m0, 0, 0), (m3, 0, 3)], [(0, 0, 1, 2)])
"""
tids = None
cur_pos = 0
for i in components:
tids_sing = TIDS([i], *TIDS.free_dum_from_indices(*indices[cur_pos:cur_pos+i.rank]))
if tids is None:
tids = tids_sing
else:
tids *= tids_sing
cur_pos += i.rank
if tids is None:
tids = TIDS([], [], [])
tids.free.sort(key=lambda x: x[0].name)
tids.dum.sort()
return tids
@deprecated(useinstead="get_indices")
def to_indices(self):
return self.get_indices()
@staticmethod
def free_dum_from_indices(*indices):
"""
Convert ``indices`` into ``free``, ``dum`` for single component tensor
``free`` list of tuples ``(index, pos, 0)``,
where ``pos`` is the position of index in
the list of indices formed by the component tensors
``dum`` list of tuples ``(pos_contr, pos_cov, 0, 0)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> TIDS.free_dum_from_indices(m0, m1, -m1, m3)
([(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
"""
n = len(indices)
if n == 1:
return [(indices[0], 0, 0)], []
# find the positions of the free indices and of the dummy indices
free = [True]*len(indices)
index_dict = {}
dum = []
for i, index in enumerate(indices):
name = index._name
typ = index._tensortype
contr = index._is_up
if (name, typ) in index_dict:
# found a pair of dummy indices
is_contr, pos = index_dict[(name, typ)]
# check consistency and update free
if is_contr:
if contr:
raise ValueError('two equal contravariant indices in slots %d and %d' %(pos, i))
else:
free[pos] = False
free[i] = False
else:
if contr:
free[pos] = False
free[i] = False
else:
raise ValueError('two equal covariant indices in slots %d and %d' %(pos, i))
if contr:
dum.append((i, pos, 0, 0))
else:
dum.append((pos, i, 0, 0))
else:
index_dict[(name, typ)] = index._is_up, i
free = [(index, i, 0) for i, index in enumerate(indices) if free[i]]
free.sort()
return free, dum
@staticmethod
def _check_matrix_indices(f_free, g_free, nc1):
# This "private" method checks matrix indices.
# Matrix indices are special as there are only two, and observe
# anomalous substitution rules to determine contractions.
dum = []
# make sure that free indices appear in the same order as in their component:
f_free.sort(key=lambda x: (x[2], x[1]))
g_free.sort(key=lambda x: (x[2], x[1]))
matrix_indices_storage = {}
transform_right_to_left = {}
f_pop_pos = []
g_pop_pos = []
for free_pos, (ind, i, c) in enumerate(f_free):
index_type = ind._tensortype
if ind not in (index_type.auto_left, -index_type.auto_right):
continue
matrix_indices_storage[ind] = (free_pos, i, c)
for free_pos, (ind, i, c) in enumerate(g_free):
index_type = ind._tensortype
if ind not in (index_type.auto_left, -index_type.auto_right):
continue
if ind == index_type.auto_left:
if -index_type.auto_right in matrix_indices_storage:
other_pos, other_i, other_c = matrix_indices_storage.pop(-index_type.auto_right)
dum.append((other_i, i, other_c, c + nc1))
# mark to remove other_pos and free_pos from free:
g_pop_pos.append(free_pos)
f_pop_pos.append(other_pos)
continue
if ind in matrix_indices_storage:
other_pos, other_i, other_c = matrix_indices_storage.pop(ind)
dum.append((other_i, i, other_c, c + nc1))
# mark to remove other_pos and free_pos from free:
g_pop_pos.append(free_pos)
f_pop_pos.append(other_pos)
transform_right_to_left[-index_type.auto_right] = c
continue
if ind in transform_right_to_left:
other_c = transform_right_to_left.pop(ind)
if c == other_c:
g_free[free_pos] = (index_type.auto_left, i, c)
for i in reversed(sorted(f_pop_pos)):
f_free.pop(i)
for i in reversed(sorted(g_pop_pos)):
g_free.pop(i)
return dum
@staticmethod
def mul(f, g):
"""
The algorithms performing the multiplication of two ``TIDS`` instances.
In short, it forms a new ``TIDS`` object, joining components and indices,
checking that abstract indices are compatible, and possibly contracting
them.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> tids_1 = TIDS.from_components_and_indices([T], [m0, m1, -m1, m3])
>>> tids_2 = TIDS.from_components_and_indices([A], [m2])
>>> tids_1 * tids_2
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz), A(Lorentz)],\
[(m0, 0, 0), (m3, 3, 0), (m2, 0, 1)], [(1, 2, 0, 0)])
In this case no contraction has been performed.
>>> tids_3 = TIDS.from_components_and_indices([A], [-m3])
>>> tids_1 * tids_3
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz), A(Lorentz)],\
[(m0, 0, 0)], [(1, 2, 0, 0), (3, 0, 0, 1)])
Free indices ``m3`` and ``-m3`` are identified as a contracted couple, and are
therefore transformed into dummy indices.
A wrong index construction (for example, trying to contract two
contravariant indices or using indices multiple times) would result in
an exception:
>>> tids_4 = TIDS.from_components_and_indices([A], [m3])
>>> # This raises an exception:
>>> # tids_1 * tids_4
"""
index_up = lambda u: u if u.is_up else -u
# lambda returns True is index is not a matrix index:
notmat = lambda i: i not in (i._tensortype.auto_left, -i._tensortype.auto_right)
f_free = f.free[:]
g_free = g.free[:]
nc1 = len(f.components)
dum = TIDS._check_matrix_indices(f_free, g_free, nc1)
# find out which free indices of f and g are contracted
free_dict1 = {i if i.is_up else -i: (pos, cpos, i) for i, pos, cpos in f_free}
free_dict2 = {i if i.is_up else -i: (pos, cpos, i) for i, pos, cpos in g_free}
free_names = set(free_dict1.keys()) & set(free_dict2.keys())
# find the new `free` and `dum`
dum2 = [(i1, i2, c1 + nc1, c2 + nc1) for i1, i2, c1, c2 in g.dum]
free1 = [(ind, i, c) for ind, i, c in f_free if index_up(ind) not in free_names]
free2 = [(ind, i, c + nc1) for ind, i, c in g_free if index_up(ind) not in free_names]
free = free1 + free2
dum.extend(f.dum + dum2)
for name in free_names:
ipos1, cpos1, ind1 = free_dict1[name]
ipos2, cpos2, ind2 = free_dict2[name]
cpos2 += nc1
if ind1._is_up == ind2._is_up:
raise ValueError('wrong index construction {0}'.format(ind1))
if ind1._is_up:
new_dummy = (ipos1, ipos2, cpos1, cpos2)
else:
new_dummy = (ipos2, ipos1, cpos2, cpos1)
dum.append(new_dummy)
return (f.components + g.components, free, dum)
def __mul__(self, other):
return TIDS(*self.mul(self, other))
def __str__(self):
return "TIDS({0}, {1}, {2})".format(self.components, self.free, self.dum)
def __repr__(self):
return self.__str__()
def sorted_components(self):
"""
Returns a ``TIDS`` with sorted components
The sorting is done taking into account the commutation group
of the component tensors.
"""
from sympy.combinatorics.permutations import _af_invert
cv = list(zip(self.components, range(len(self.components))))
sign = 1
n = len(cv) - 1
for i in range(n):
for j in range(n, i, -1):
c = cv[j-1][0].commutes_with(cv[j][0])
if c not in [0, 1]:
continue
if (cv[j-1][0]._types, cv[j-1][0]._name) > \
(cv[j][0]._types, cv[j][0]._name):
cv[j-1], cv[j] = cv[j], cv[j-1]
if c:
sign = -sign
# perm_inv[new_pos] = old_pos
components = [x[0] for x in cv]
perm_inv = [x[1] for x in cv]
perm = _af_invert(perm_inv)
free = [(ind, i, perm[c]) for ind, i, c in self.free]
free.sort()
dum = [(i1, i2, perm[c1], perm[c2]) for i1, i2, c1, c2 in self.dum]
dum.sort(key=lambda x: components[x[2]].index_types[x[0]])
return TIDS(components, free, dum), sign
def _get_sorted_free_indices_for_canon(self):
sorted_free = self.free[:]
sorted_free.sort(key=lambda x: x[0])
return sorted_free
def _get_sorted_dum_indices_for_canon(self):
return sorted(self.dum, key=lambda x: (x[2], x[0]))
def canon_args(self):
"""
Returns ``(g, dummies, msym, v)``, the entries of ``canonicalize``
see ``canonicalize`` in ``tensor_can.py``
"""
# to be called after sorted_components
from sympy.combinatorics.permutations import _af_new
# types = list(set(self._types))
# types.sort(key = lambda x: x._name)
n = self._ext_rank
g = [None]*n + [n, n+1]
pos = 0
vpos = []
components = self.components
for t in components:
vpos.append(pos)
pos += t._rank
# ordered indices: first the free indices, ordered by types
# then the dummy indices, ordered by types and contravariant before
# covariant
# g[position in tensor] = position in ordered indices
for i, (indx, ipos, cpos) in enumerate(self._get_sorted_free_indices_for_canon()):
pos = vpos[cpos] + ipos
g[pos] = i
pos = len(self.free)
j = len(self.free)
dummies = []
prev = None
a = []
msym = []
for ipos1, ipos2, cpos1, cpos2 in self._get_sorted_dum_indices_for_canon():
pos1 = vpos[cpos1] + ipos1
pos2 = vpos[cpos2] + ipos2
g[pos1] = j
g[pos2] = j + 1
j += 2
typ = components[cpos1].index_types[ipos1]
if typ != prev:
if a:
dummies.append(a)
a = [pos, pos + 1]
prev = typ
msym.append(typ.metric_antisym)
else:
a.extend([pos, pos + 1])
pos += 2
if a:
dummies.append(a)
numtyp = []
prev = None
for t in components:
if t == prev:
numtyp[-1][1] += 1
else:
prev = t
numtyp.append([prev, 1])
v = []
for h, n in numtyp:
if h._comm == 0 or h._comm == 1:
comm = h._comm
else:
comm = TensorManager.get_comm(h._comm, h._comm)
v.append((h._symmetry.base, h._symmetry.generators, n, comm))
return _af_new(g), dummies, msym, v
def perm2tensor(self, g, canon_bp=False):
"""
Returns a ``TIDS`` instance corresponding to the permutation ``g``
``g`` permutation corresponding to the tensor in the representation
used in canonicalization
``canon_bp`` if True, then ``g`` is the permutation
corresponding to the canonical form of the tensor
"""
vpos = []
components = self.components
pos = 0
for t in components:
vpos.append(pos)
pos += t._rank
sorted_free = [i[0] for i in self._get_sorted_free_indices_for_canon()]
nfree = len(sorted_free)
rank = self._ext_rank
dum = [[None]*4 for i in range((rank - nfree)//2)]
free = []
icomp = -1
for i in range(rank):
if i in vpos:
icomp += vpos.count(i)
pos0 = i
ipos = i - pos0
gi = g[i]
if gi < nfree:
ind = sorted_free[gi]
free.append((ind, ipos, icomp))
else:
j = gi - nfree
idum, cov = divmod(j, 2)
if cov:
dum[idum][1] = ipos
dum[idum][3] = icomp
else:
dum[idum][0] = ipos
dum[idum][2] = icomp
dum = [tuple(x) for x in dum]
return TIDS(components, free, dum)
def get_indices(self):
"""
Get a list of indices, creating new tensor indices to complete dummy indices.
"""
components = self.components
free = self.free
dum = self.dum
indices = [None]*self._ext_rank
start = 0
pos = 0
vpos = []
for t in components:
vpos.append(pos)
pos += t.rank
cdt = defaultdict(int)
# if the free indices have names with dummy_fmt, start with an
# index higher than those for the dummy indices
# to avoid name collisions
for indx, ipos, cpos in free:
if indx._name.split('_')[0] == indx._tensortype._dummy_fmt[:-3]:
cdt[indx._tensortype] = max(cdt[indx._tensortype], int(indx._name.split('_')[1]) + 1)
start = vpos[cpos]
indices[start + ipos] = indx
for ipos1, ipos2, cpos1, cpos2 in dum:
start1 = vpos[cpos1]
start2 = vpos[cpos2]
typ1 = components[cpos1].index_types[ipos1]
assert typ1 == components[cpos2].index_types[ipos2]
fmt = typ1._dummy_fmt
nd = cdt[typ1]
indices[start1 + ipos1] = TensorIndex(fmt % nd, typ1)
indices[start2 + ipos2] = TensorIndex(fmt % nd, typ1, False)
cdt[typ1] += 1
return indices
def contract_metric(self, g):
"""
Returns new TIDS and sign.
Sign is either 1 or -1, to correct the sign after metric contraction
(for spinor indices).
"""
components = self.components
antisym = g.index_types[0].metric_antisym
#if not any(x == g for x in components):
# return self
# list of positions of the metric ``g``
gpos = [i for i, x in enumerate(components) if x == g]
if not gpos:
return self, 1
sign = 1
dum = self.dum[:]
free = self.free[:]
elim = set()
for gposx in gpos:
if gposx in elim:
continue
free1 = [x for x in free if x[-1] == gposx]
dum1 = [x for x in dum if x[-2] == gposx or x[-1] == gposx]
if not dum1:
continue
elim.add(gposx)
if len(dum1) == 2:
if not antisym:
dum10, dum11 = dum1
if dum10[3] == gposx:
# the index with pos p0 and component c0 is contravariant
c0 = dum10[2]
p0 = dum10[0]
else:
# the index with pos p0 and component c0 is covariant
c0 = dum10[3]
p0 = dum10[1]
if dum11[3] == gposx:
# the index with pos p1 and component c1 is contravariant
c1 = dum11[2]
p1 = dum11[0]
else:
# the index with pos p1 and component c1 is covariant
c1 = dum11[3]
p1 = dum11[1]
dum.append((p0, p1, c0, c1))
else:
dum10, dum11 = dum1
# change the sign to bring the indices of the metric to contravariant
# form; change the sign if dum10 has the metric index in position 0
if dum10[3] == gposx:
# the index with pos p0 and component c0 is contravariant
c0 = dum10[2]
p0 = dum10[0]
if dum10[1] == 1:
sign = -sign
else:
# the index with pos p0 and component c0 is covariant
c0 = dum10[3]
p0 = dum10[1]
if dum10[0] == 0:
sign = -sign
if dum11[3] == gposx:
# the index with pos p1 and component c1 is contravariant
c1 = dum11[2]
p1 = dum11[0]
sign = -sign
else:
# the index with pos p1 and component c1 is covariant
c1 = dum11[3]
p1 = dum11[1]
dum.append((p0, p1, c0, c1))
elif len(dum1) == 1:
if not antisym:
dp0, dp1, dc0, dc1 = dum1[0]
if dc0 == dc1:
# g(i, -i)
typ = g.index_types[0]
if typ._dim is None:
raise ValueError('dimension not assigned')
sign = sign*typ._dim
else:
# g(i0, i1)*p(-i1)
if dc0 == gposx:
p1 = dp1
c1 = dc1
else:
p1 = dp0
c1 = dc0
ind, p, c = free1[0]
free.append((ind, p1, c1))
else:
dp0, dp1, dc0, dc1 = dum1[0]
if dc0 == dc1:
# g(i, -i)
typ = g.index_types[0]
if typ._dim is None:
raise ValueError('dimension not assigned')
sign = sign*typ._dim
if dp0 < dp1:
# g(i, -i) = -D with antisymmetric metric
sign = -sign
else:
# g(i0, i1)*p(-i1)
if dc0 == gposx:
p1 = dp1
c1 = dc1
if dp0 == 0:
sign = -sign
else:
p1 = dp0
c1 = dc0
ind, p, c = free1[0]
free.append((ind, p1, c1))
dum = [x for x in dum if x not in dum1]
free = [x for x in free if x not in free1]
shift = 0
shifts = [0]*len(components)
for i in range(len(components)):
if i in elim:
shift += 1
continue
shifts[i] = shift
free = [(ind, p, c - shifts[c]) for (ind, p, c) in free if c not in elim]
dum = [(p0, p1, c0 - shifts[c0], c1 - shifts[c1]) for i, (p0, p1, c0, c1) in enumerate(dum) if c0 not in elim and c1 not in elim]
components = [c for i, c in enumerate(components) if i not in elim]
tids = TIDS(components, free, dum)
return tids, sign
class _TensorDataLazyEvaluator(CantSympify):
"""
EXPERIMENTAL: do not rely on this class, it may change without deprecation
warnings in future versions of SymPy.
This object contains the logic to associate components data to a tensor
expression. Components data are set via the ``.data`` property of tensor
expressions, is stored inside this class as a mapping between the tensor
expression and the ``ndarray``.
Computations are executed lazily: whereas the tensor expressions can have
contractions, tensor products, and additions, components data are not
computed until they are accessed by reading the ``.data`` property
associated to the tensor expression.
"""
_substitutions_dict = dict()
_substitutions_dict_tensmul = dict()
def __getitem__(self, key):
dat = self._get(key)
if dat is None:
return None
numpy = import_module("numpy")
if not isinstance(dat, numpy.ndarray):
return dat
if dat.ndim == 0:
return dat[()]
elif dat.ndim == 1 and dat.size == 1:
return dat[0]
return dat
def _get(self, key):
"""
Retrieve ``data`` associated with ``key``.
This algorithm looks into ``self._substitutions_dict`` for all
``TensorHead`` in the ``TensExpr`` (or just ``TensorHead`` if key is a
TensorHead instance). It reconstructs the components data that the
tensor expression should have by performing on components data the
operations that correspond to the abstract tensor operations applied.
Metric tensor is handled in a different manner: it is pre-computed in
``self._substitutions_dict_tensmul``.
"""
if key in self._substitutions_dict:
return self._substitutions_dict[key]
if isinstance(key, TensorHead):
return None
if isinstance(key, Tensor):
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple([i.is_up for i in key.get_indices()])
srch = (key.component,) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
return self.data_tensmul_from_tensorhead(key, key.component)
if isinstance(key, TensMul):
tensmul_list = key.split()
if len(tensmul_list) == 1 and len(tensmul_list[0].components) == 1:
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple([i.is_up for i in tensmul_list[0].get_indices()])
srch = (tensmul_list[0].components[0],) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
data_list = [self.data_tensmul_from_tensorhead(i, i.components[0]) for i in tensmul_list]
if all([i is None for i in data_list]):
return None
if any([i is None for i in data_list]):
raise ValueError("Mixing tensors with associated components "\
"data with tensors without components data")
data_result, tensmul_result = self.data_product_tensors(data_list, tensmul_list)
return data_result
if isinstance(key, TensAdd):
sumvar = S.Zero
data_list = []
free_args_list = []
for arg in key.args:
if isinstance(arg, TensExpr):
data_list.append(arg.data)
free_args_list.append([x[0] for x in arg.free])
else:
data_list.append(arg)
free_args_list.append([])
if all([i is None for i in data_list]):
return None
if any([i is None for i in data_list]):
raise ValueError("Mixing tensors with associated components "\
"data with tensors without components data")
numpy = import_module("numpy")
for data, free_args in zip(data_list, free_args_list):
if len(free_args) < 2:
sumvar += data
else:
free_args_pos = {y: x for x, y in enumerate(free_args)}
axes = [free_args_pos[arg] for arg in key.free_args]
sumvar += numpy.transpose(data, axes)
return sumvar
return None
def data_tensorhead_from_tensmul(self, data, tensmul, tensorhead):
"""
This method is used when assigning components data to a ``TensMul``
object, it converts components data to a fully contravariant ndarray,
which is then stored according to the ``TensorHead`` key.
"""
if data is None:
return None
return self._correct_signature_from_indices(
data,
tensmul.get_indices(),
tensmul.free,
tensmul.dum,
True)
def data_tensmul_from_tensorhead(self, tensmul, tensorhead):
"""
This method corrects the components data to the right signature
(covariant/contravariant) using the metric associated with each
``TensorIndexType``.
"""
if tensorhead.data is None:
return None
return self._correct_signature_from_indices(
tensorhead.data,
tensmul.get_indices(),
tensmul.free,
tensmul.dum)
def data_product_tensors(self, data_list, tensmul_list):
"""
Given a ``data_list``, list of ``ndarray``'s and a ``tensmul_list``,
list of ``TensMul`` instances, compute the resulting ``ndarray``,
after tensor products and contractions.
"""
def data_mul(f, g):
"""
Multiplies two ``ndarray`` objects, it first calls ``TIDS.mul``,
then checks which indices have been contracted, and finally
contraction operation on data, according to the contracted indices.
"""
data1, tensmul1 = f
data2, tensmul2 = g
components, free, dum = TIDS.mul(tensmul1, tensmul2)
data = _TensorDataLazyEvaluator._contract_ndarray(tensmul1.free, tensmul2.free, data1, data2)
# TODO: do this more efficiently... maybe by just passing an index list
# to .data_product_tensor(...)
return data, TensMul.from_TIDS(S.One, TIDS(components, free, dum))
return reduce(data_mul, zip(data_list, tensmul_list))
def _assign_data_to_tensor_expr(self, key, data):
if isinstance(key, TensAdd):
raise ValueError('cannot assign data to TensAdd')
# here it is assumed that `key` is a `TensMul` instance.
if len(key.components) != 1:
raise ValueError('cannot assign data to TensMul with multiple components')
tensorhead = key.components[0]
newdata = self.data_tensorhead_from_tensmul(data, key, tensorhead)
return tensorhead, newdata
def _check_permutations_on_data(self, tens, data):
import numpy
if isinstance(tens, TensorHead):
rank = tens.rank
generators = tens.symmetry.generators
elif isinstance(tens, Tensor):
rank = tens.rank
generators = tens.components[0].symmetry.generators
elif isinstance(tens, TensorIndexType):
rank = tens.metric.rank
generators = tens.metric.symmetry.generators
# Every generator is a permutation, check that by permuting the array
# by that permutation, the array will be the same, except for a
# possible sign change if the permutation admits it.
for gener in generators:
sign_change = +1 if (gener(rank) == rank) else -1
data_swapped = data
last_data = data
permute_axes = list(map(gener, list(range(rank))))
# the order of a permutation is the number of times to get the
# identity by applying that permutation.
for i in range(gener.order()-1):
data_swapped = numpy.transpose(data_swapped, permute_axes)
# if any value in the difference array is non-zero, raise an error:
if (last_data - sign_change*data_swapped).any():
raise ValueError("Component data symmetry structure error")
last_data = data_swapped
def __setitem__(self, key, value):
"""
Set the components data of a tensor object/expression.
Components data are transformed to the all-contravariant form and stored
with the corresponding ``TensorHead`` object. If a ``TensorHead`` object
cannot be uniquely identified, it will raise an error.
"""
data = _TensorDataLazyEvaluator.parse_data(value)
self._check_permutations_on_data(key, data)
# TensorHead and TensorIndexType can be assigned data directly, while
# TensMul must first convert data to a fully contravariant form, and
# assign it to its corresponding TensorHead single component.
if not isinstance(key, (TensorHead, TensorIndexType)):
key, data = self._assign_data_to_tensor_expr(key, data)
if isinstance(key, TensorHead):
for dim, indextype in zip(data.shape, key.index_types):
if indextype.data is None:
raise ValueError("index type {} has no components data"\
" associated (needed to raise/lower index)".format(indextype))
if indextype.dim is None:
continue
if dim != indextype.dim:
raise ValueError("wrong dimension of ndarray")
self._substitutions_dict[key] = data
def __delitem__(self, key):
del self._substitutions_dict[key]
def __contains__(self, key):
return key in self._substitutions_dict
@staticmethod
def _contract_ndarray(free1, free2, ndarray1, ndarray2):
numpy = import_module('numpy')
def ikey(x):
# sort by component number , then by position in component
return x[2], x[1]
free1 = free1[:]
free2 = free2[:]
free1.sort(key=ikey)
free2.sort(key=ikey)
self_free = [_[0] for _ in free1]
axes1 = []
axes2 = []
for jpos, jindex in enumerate(free2):
if -jindex[0] in self_free:
nidx = self_free.index(-jindex[0])
else:
continue
axes1.append(nidx)
axes2.append(jpos)
contracted_ndarray = numpy.tensordot(
ndarray1,
ndarray2,
(axes1, axes2)
)
return contracted_ndarray
@staticmethod
def add_tensor_mul(prod, f, g):
def mul_function():
return _TensorDataLazyEvaluator._contract_ndarray(f.free, g.free, f.data, g.data)
_TensorDataLazyEvaluator._substitutions_dict[prod] = mul_function()
@staticmethod
def add_tensor_add(addition, f, g):
def add_function():
return f.data + g.data
_TensorDataLazyEvaluator._substitutions_dict[addition] = add_function()
def add_metric_data(self, metric, data):
"""
Assign data to the ``metric`` tensor. The metric tensor behaves in an
anomalous way when raising and lowering indices.
A fully covariant metric is the inverse transpose of the fully
contravariant metric (it is meant matrix inverse). If the metric is
symmetric, the transpose is not necessary and mixed
covariant/contravariant metrics are Kronecker deltas.
"""
# hard assignment, data should not be added to `TensorHead` for metric:
# the problem with `TensorHead` is that the metric is anomalous, i.e.
# raising and lowering the index means considering the metric or its
# inverse, this is not the case for other tensors.
self._substitutions_dict_tensmul[metric, True, True] = data
inverse_transpose = self.inverse_transpose_matrix(data)
# in symmetric spaces, the traspose is the same as the original matrix,
# the full covariant metric tensor is the inverse transpose, so this
# code will be able to handle non-symmetric metrics.
self._substitutions_dict_tensmul[metric, False, False] = inverse_transpose
# now mixed cases, these are identical to the unit matrix if the metric
# is symmetric.
m = Matrix(data)
invt = Matrix(inverse_transpose)
self._substitutions_dict_tensmul[metric, True, False] = m * invt
self._substitutions_dict_tensmul[metric, False, True] = invt * m
@staticmethod
def _flip_index_by_metric(data, metric, pos):
numpy = import_module('numpy')
data = numpy.tensordot(
metric,
data,
(1, pos))
return numpy.rollaxis(data, 0, pos+1)
@staticmethod
def inverse_matrix(ndarray):
m = Matrix(ndarray).inv()
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def inverse_transpose_matrix(ndarray):
m = Matrix(ndarray).inv().T
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def _correct_signature_from_indices(data, indices, free, dum, inverse=False):
"""
Utility function to correct the values inside the components data
ndarray according to whether indices are covariant or contravariant.
It uses the metric matrix to lower values of covariant indices.
"""
numpy = import_module('numpy')
# change the ndarray values according covariantness/contravariantness of the indices
# use the metric
for i, indx in enumerate(indices):
if not indx.is_up and not inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(data, indx._tensortype.data, i)
elif not indx.is_up and inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(
data,
_TensorDataLazyEvaluator.inverse_matrix(indx._tensortype.data),
i
)
if len(dum) > 0:
### perform contractions ###
axes1 = []
axes2 = []
for i, indx1 in enumerate(indices):
try:
nd = indices[:i].index(-indx1)
except ValueError:
continue
axes1.append(nd)
axes2.append(i)
for ax1, ax2 in zip(axes1, axes2):
data = numpy.trace(data, axis1=ax1, axis2=ax2)
return data
@staticmethod
def _sort_data_axes(old, new):
numpy = import_module('numpy')
new_data = old.data.copy()
old_free = [i[0] for i in old.free]
new_free = [i[0] for i in new.free]
for i in range(len(new_free)):
for j in range(i, len(old_free)):
if old_free[j] == new_free[i]:
old_free[i], old_free[j] = old_free[j], old_free[i]
new_data = numpy.swapaxes(new_data, i, j)
break
return new_data
@staticmethod
def add_rearrange_tensmul_parts(new_tensmul, old_tensmul):
def sorted_compo():
return _TensorDataLazyEvaluator._sort_data_axes(old_tensmul, new_tensmul)
_TensorDataLazyEvaluator._substitutions_dict[new_tensmul] = sorted_compo()
@staticmethod
@doctest_depends_on(modules=('numpy',))
def parse_data(data):
"""
Transform ``data`` to a numpy ndarray. The parameter ``data`` may
contain data in various formats, e.g. nested lists, sympy ``Matrix``,
and so on.
Examples
========
>>> from sympy.tensor.tensor import _TensorDataLazyEvaluator
>>> _TensorDataLazyEvaluator.parse_data([1, 3, -6, 12])
[1 3 -6 12]
>>> _TensorDataLazyEvaluator.parse_data([[1, 2], [4, 7]])
[[1 2]
[4 7]]
"""
numpy = import_module('numpy')
if (numpy is not None) and (not isinstance(data, numpy.ndarray)):
if len(data) == 2 and hasattr(data[0], '__call__'):
def fromfunction_sympify(*x):
return sympify(data[0](*x))
data = numpy.fromfunction(fromfunction_sympify, data[1])
else:
vsympify = numpy.vectorize(sympify)
data = vsympify(numpy.array(data))
return data
_tensor_data_substitution_dict = _TensorDataLazyEvaluator()
class _TensorManager(object):
"""
Class to manage tensor properties.
Notes
=====
Tensors belong to tensor commutation groups; each group has a label
``comm``; there are predefined labels:
``0`` tensors commuting with any other tensor
``1`` tensors anticommuting among themselves
``2`` tensors not commuting, apart with those with ``comm=0``
Other groups can be defined using ``set_comm``; tensors in those
groups commute with those with ``comm=0``; by default they
do not commute with any other group.
"""
def __init__(self):
self._comm_init()
def _comm_init(self):
self._comm = [{} for i in range(3)]
for i in range(3):
self._comm[0][i] = 0
self._comm[i][0] = 0
self._comm[1][1] = 1
self._comm[2][1] = None
self._comm[1][2] = None
self._comm_symbols2i = {0:0, 1:1, 2:2}
self._comm_i2symbol = {0:0, 1:1, 2:2}
@property
def comm(self):
return self._comm
def comm_symbols2i(self, i):
"""
get the commutation group number corresponding to ``i``
``i`` can be a symbol or a number or a string
If ``i`` is not already defined its commutation group number
is set.
"""
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
return n
return self._comm_symbols2i[i]
def comm_i2symbol(self, i):
"""
Returns the symbol corresponding to the commutation group number.
"""
return self._comm_i2symbol[i]
def set_comm(self, i, j, c):
"""
set the commutation parameter ``c`` for commutation groups ``i, j``
Parameters
==========
i, j : symbols representing commutation groups
c : group commutation number
Notes
=====
``i, j`` can be symbols, strings or numbers,
apart from ``0, 1`` and ``2`` which are reserved respectively
for commuting, anticommuting tensors and tensors not commuting
with any other group apart with the commuting tensors.
For the remaining cases, use this method to set the commutation rules;
by default ``c=None``.
The group commutation number ``c`` is assigned in correspondence
to the group commutation symbols; it can be
0 commuting
1 anticommuting
None no commutation property
Examples
========
``G`` and ``GH`` do not commute with themselves and commute with
each other; A is commuting.
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead, TensorManager
>>> Lorentz = TensorIndexType('Lorentz')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> G = tensorhead('G', [Lorentz], [[1]], 'Gcomm')
>>> GH = tensorhead('GH', [Lorentz], [[1]], 'GHcomm')
>>> TensorManager.set_comm('Gcomm', 'GHcomm', 0)
>>> (GH(i1)*G(i0)).canon_bp()
G(i0)*GH(i1)
>>> (G(i1)*G(i0)).canon_bp()
G(i1)*G(i0)
>>> (G(i1)*A(i0)).canon_bp()
A(i0)*G(i1)
"""
if c not in (0, 1, None):
raise ValueError('`c` can assume only the values 0, 1 or None')
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
if j not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[0][n] = 0
self._comm[n][0] = 0
self._comm_symbols2i[j] = n
self._comm_i2symbol[n] = j
ni = self._comm_symbols2i[i]
nj = self._comm_symbols2i[j]
self._comm[ni][nj] = c
self._comm[nj][ni] = c
def set_comms(self, *args):
"""
set the commutation group numbers ``c`` for symbols ``i, j``
Parameters
==========
args : sequence of ``(i, j, c)``
"""
for i, j, c in args:
self.set_comm(i, j, c)
def get_comm(self, i, j):
"""
Return the commutation parameter for commutation group numbers ``i, j``
see ``_TensorManager.set_comm``
"""
return self._comm[i].get(j, 0 if i == 0 or j == 0 else None)
def clear(self):
"""
Clear the TensorManager.
"""
self._comm_init()
TensorManager = _TensorManager()
@doctest_depends_on(modules=('numpy',))
class TensorIndexType(Basic):
"""
A TensorIndexType is characterized by its name and its metric.
Parameters
==========
name : name of the tensor type
metric : metric symmetry or metric object or ``None``
dim : dimension, it can be a symbol or an integer or ``None``
eps_dim : dimension of the epsilon tensor
dummy_fmt : name of the head of dummy indices
Attributes
==========
``name``
``metric_name`` : it is 'metric' or metric.name
``metric_antisym``
``metric`` : the metric tensor
``delta`` : ``Kronecker delta``
``epsilon`` : the ``Levi-Civita epsilon`` tensor
``dim``
``dim_eps``
``dummy_fmt``
``data`` : a property to add ``ndarray`` values, to work in a specified basis.
Notes
=====
The ``metric`` parameter can be:
``metric = False`` symmetric metric (in Riemannian geometry)
``metric = True`` antisymmetric metric (for spinor calculus)
``metric = None`` there is no metric
``metric`` can be an object having ``name`` and ``antisym`` attributes.
If there is a metric the metric is used to raise and lower indices.
In the case of antisymmetric metric, the following raising and
lowering conventions will be adopted:
``psi(a) = g(a, b)*psi(-b); chi(-a) = chi(b)*g(-b, -a)``
``g(-a, b) = delta(-a, b); g(b, -a) = -delta(a, -b)``
where ``delta(-a, b) = delta(b, -a)`` is the ``Kronecker delta``
(see ``TensorIndex`` for the conventions on indices).
If there is no metric it is not possible to raise or lower indices;
e.g. the index of the defining representation of ``SU(N)``
is 'covariant' and the conjugate representation is
'contravariant'; for ``N > 2`` they are linearly independent.
``eps_dim`` is by default equal to ``dim``, if the latter is an integer;
else it can be assigned (for use in naive dimensional regularization);
if ``eps_dim`` is not an integer ``epsilon`` is ``None``.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> Lorentz.metric
metric(Lorentz,Lorentz)
Examples with metric components data added, this means it is working on a
fixed basis:
>>> Lorentz.data = [1, -1, -1, -1]
>>> Lorentz
TensorIndexType(Lorentz, 0)
>>> Lorentz.data
[[1 0 0 0]
[0 -1 0 0]
[0 0 -1 0]
[0 0 0 -1]]
"""
def __new__(cls, name, metric=False, dim=None, eps_dim=None,
dummy_fmt=None):
if isinstance(name, string_types):
name = Symbol(name)
obj = Basic.__new__(cls, name, S.One if metric else S.Zero)
obj._name = str(name)
if not dummy_fmt:
obj._dummy_fmt = '%s_%%d' % obj.name
else:
obj._dummy_fmt = '%s_%%d' % dummy_fmt
if metric is None:
obj.metric_antisym = None
obj.metric = None
else:
if metric in (True, False, 0, 1):
metric_name = 'metric'
obj.metric_antisym = metric
else:
metric_name = metric.name
obj.metric_antisym = metric.antisym
sym2 = TensorSymmetry(get_symmetric_group_sgs(2, obj.metric_antisym))
S2 = TensorType([obj]*2, sym2)
obj.metric = S2(metric_name)
obj.metric._matrix_behavior = True
obj._dim = dim
obj._delta = obj.get_kronecker_delta()
obj._eps_dim = eps_dim if eps_dim else dim
obj._epsilon = obj.get_epsilon()
obj._autogenerated = []
return obj
@property
def auto_right(self):
if not hasattr(self, '_auto_right'):
self._auto_right = TensorIndex("auto_right", self)
return self._auto_right
@property
def auto_left(self):
if not hasattr(self, '_auto_left'):
self._auto_left = TensorIndex("auto_left", self)
return self._auto_left
@property
def auto_index(self):
if not hasattr(self, '_auto_index'):
self._auto_index = TensorIndex("auto_index", self)
return self._auto_index
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
# This assignment is a bit controversial, should metric components be assigned
# to the metric only or also to the TensorIndexType object? The advantage here
# is the ability to assign a 1D array and transform it to a 2D diagonal array.
numpy = import_module('numpy')
data = _TensorDataLazyEvaluator.parse_data(data)
if data.ndim > 2:
raise ValueError("data have to be of rank 1 (diagonal metric) or 2.")
if data.ndim == 1:
if self.dim is not None:
nda_dim = data.shape[0]
if nda_dim != self.dim:
raise ValueError("Dimension mismatch")
dim = data.shape[0]
newndarray = numpy.zeros((dim, dim), dtype=object)
for i, val in enumerate(data):
newndarray[i, i] = val
data = newndarray
dim1, dim2 = data.shape
if dim1 != dim2:
raise ValueError("Non-square matrix tensor.")
if self.dim is not None:
if self.dim != dim1:
raise ValueError("Dimension mismatch")
_tensor_data_substitution_dict[self] = data
_tensor_data_substitution_dict.add_metric_data(self.metric, data)
delta = self.get_kronecker_delta()
i1 = TensorIndex('i1', self)
i2 = TensorIndex('i2', self)
delta(i1, -i2).data = _TensorDataLazyEvaluator.parse_data(eye(dim1))
epsilon = self.get_epsilon()
i3 = TensorIndex('i3', self)
levi_civita = [[[LeviCivita(i,j,k) for k in range(dim1)]
for j in range(dim1)]
for i in range(dim1)]
epsilon(i1,-i2,-i3).data = _TensorDataLazyEvaluator.parse_data(levi_civita)
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
if self.metric in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self.metric]
@property
def name(self):
return self._name
@property
def dim(self):
return self._dim
@property
def delta(self):
return self._delta
@property
def eps_dim(self):
return self._eps_dim
@property
def epsilon(self):
return self._epsilon
@property
def dummy_fmt(self):
return self._dummy_fmt
def get_kronecker_delta(self):
sym2 = TensorSymmetry(get_symmetric_group_sgs(2))
S2 = TensorType([self]*2, sym2)
delta = S2('KD')
delta._matrix_behavior = True
return delta
def get_epsilon(self):
if not isinstance(self._eps_dim, int):
return None
sym = TensorSymmetry(get_symmetric_group_sgs(self._eps_dim, 1))
Sdim = TensorType([self]*self._eps_dim, sym)
epsilon = Sdim('Eps')
return epsilon
def __lt__(self, other):
return self.name < other.name
def __str__(self):
return self.name
__repr__ = __str__
def _components_data_full_destroy(self):
"""
EXPERIMENTAL: do not rely on this API method.
This destroys components data associated to the ``TensorIndexType``, if
any, specifically:
* metric tensor data
* Kronecker tensor data
"""
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def delete_tensmul_data(key):
if key in _tensor_data_substitution_dict._substitutions_dict_tensmul:
del _tensor_data_substitution_dict._substitutions_dict_tensmul[key]
# delete metric data:
delete_tensmul_data((self.metric, True, True))
delete_tensmul_data((self.metric, True, False))
delete_tensmul_data((self.metric, False, True))
delete_tensmul_data((self.metric, False, False))
# delete delta tensor data:
delta = self.get_kronecker_delta()
if delta in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[delta]
@doctest_depends_on(modules=('numpy',))
class TensorIndex(Basic):
"""
Represents an abstract tensor index.
Parameters
==========
name : name of the index, or ``True`` if you want it to be automatically assigned
tensortype : ``TensorIndexType`` of the index
is_up : flag for contravariant index
Attributes
==========
``name``
``tensortype``
``is_up``
Notes
=====
Tensor indices are contracted with the Einstein summation convention.
An index can be in contravariant or in covariant form; in the latter
case it is represented prepending a ``-`` to the index name.
Dummy indices have a name with head given by ``tensortype._dummy_fmt``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorIndex, TensorSymmetry, TensorType, get_symmetric_group_sgs
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i = TensorIndex('i', Lorentz); i
i
>>> sym1 = TensorSymmetry(*get_symmetric_group_sgs(1))
>>> S1 = TensorType([Lorentz], sym1)
>>> A, B = S1('A,B')
>>> A(i)*B(-i)
A(L_0)*B(-L_0)
If you want the index name to be automatically assigned, just put ``True``
in the ``name`` field, it will be generated using the reserved character
``_`` in front of its name, in order to avoid conflicts with possible
existing indices:
>>> i0 = TensorIndex(True, Lorentz)
>>> i0
_i0
>>> i1 = TensorIndex(True, Lorentz)
>>> i1
_i1
>>> A(i0)*B(-i1)
A(_i0)*B(-_i1)
>>> A(i0)*B(-i0)
A(L_0)*B(-L_0)
"""
def __new__(cls, name, tensortype, is_up=True):
if isinstance(name, string_types):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
elif name is True:
name = "_i{0}".format(len(tensortype._autogenerated))
name_symbol = Symbol(name)
tensortype._autogenerated.append(name_symbol)
else:
raise ValueError("invalid name")
obj = Basic.__new__(cls, name_symbol, tensortype, S.One if is_up else S.Zero)
obj._name = str(name)
obj._tensortype = tensortype
obj._is_up = is_up
return obj
@property
def name(self):
return self._name
@property
def tensortype(self):
return self._tensortype
@property
def is_up(self):
return self._is_up
def _print(self):
s = self._name
if not self._is_up:
s = '-%s' % s
return s
def __lt__(self, other):
return (self._tensortype, self._name) < (other._tensortype, other._name)
def __neg__(self):
t1 = TensorIndex(self._name, self._tensortype,
(not self._is_up))
return t1
def tensor_indices(s, typ):
"""
Returns list of tensor indices given their names and their types
Parameters
==========
s : string of comma separated names of indices
typ : list of ``TensorIndexType`` of the indices
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
"""
if isinstance(s, str):
a = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
tilist = [TensorIndex(i, typ) for i in a]
if len(tilist) == 1:
return tilist[0]
return tilist
@doctest_depends_on(modules=('numpy',))
class TensorSymmetry(Basic):
"""
Monoterm symmetry of a tensor
Parameters
==========
bsgs : tuple ``(base, sgs)`` BSGS of the symmetry of the tensor
Attributes
==========
``base`` : base of the BSGS
``generators`` : generators of the BSGS
``rank`` : rank of the tensor
Notes
=====
A tensor can have an arbitrary monoterm symmetry provided by its BSGS.
Multiterm symmetries, like the cyclic symmetry of the Riemann tensor,
are not covered.
See Also
========
sympy.combinatorics.tensor_can.get_symmetric_group_sgs
Examples
========
Define a symmetric tensor
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, TensorType, get_symmetric_group_sgs
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = TensorSymmetry(get_symmetric_group_sgs(2))
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
def __new__(cls, *args, **kw_args):
if len(args) == 1:
base, generators = args[0]
elif len(args) == 2:
base, generators = args
else:
raise TypeError("bsgs required, either two separate parameters or one tuple")
if not isinstance(base, Tuple):
base = Tuple(*base)
if not isinstance(generators, Tuple):
generators = Tuple(*generators)
obj = Basic.__new__(cls, base, generators, **kw_args)
return obj
@property
def base(self):
return self.args[0]
@property
def generators(self):
return self.args[1]
@property
def rank(self):
return self.args[1][0].size - 2
def tensorsymmetry(*args):
"""
Return a ``TensorSymmetry`` object.
One can represent a tensor with any monoterm slot symmetry group
using a BSGS.
``args`` can be a BSGS
``args[0]`` base
``args[1]`` sgs
Usually tensors are in (direct products of) representations
of the symmetric group;
``args`` can be a list of lists representing the shapes of Young tableaux
Notes
=====
For instance:
``[[1]]`` vector
``[[1]*n]`` symmetric tensor of rank ``n``
``[[n]]`` antisymmetric tensor of rank ``n``
``[[2, 2]]`` monoterm slot symmetry of the Riemann tensor
``[[1],[1]]`` vector*vector
``[[2],[1],[1]`` (antisymmetric tensor)*vector*vector
Notice that with the shape ``[2, 2]`` we associate only the monoterm
symmetries of the Riemann tensor; this is an abuse of notation,
since the shape ``[2, 2]`` corresponds usually to the irreducible
representation characterized by the monoterm symmetries and by the
cyclic symmetry.
Examples
========
Symmetric tensor using a Young tableau
>>> from sympy.tensor.tensor import TensorIndexType, TensorType, tensorsymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1, 1])
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
Symmetric tensor using a ``BSGS`` (base, strong generator set)
>>> from sympy.tensor.tensor import TensorSymmetry, get_symmetric_group_sgs
>>> sym2 = tensorsymmetry(*get_symmetric_group_sgs(2))
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
from sympy.combinatorics import Permutation
def tableau2bsgs(a):
if len(a) == 1:
# antisymmetric vector
n = a[0]
bsgs = get_symmetric_group_sgs(n, 1)
else:
if all(x == 1 for x in a):
# symmetric vector
n = len(a)
bsgs = get_symmetric_group_sgs(n)
elif a == [2, 2]:
bsgs = riemann_bsgs
else:
raise NotImplementedError
return bsgs
if not args:
return TensorSymmetry(Tuple(), Tuple(Permutation(1)))
if len(args) == 2 and isinstance(args[1][0], Permutation):
return TensorSymmetry(args)
base, sgs = tableau2bsgs(args[0])
for a in args[1:]:
basex, sgsx = tableau2bsgs(a)
base, sgs = bsgs_direct_product(base, sgs, basex, sgsx)
return TensorSymmetry(Tuple(base, sgs))
@doctest_depends_on(modules=('numpy',))
class TensorType(Basic):
"""
Class of tensor types.
Parameters
==========
index_types : list of ``TensorIndexType`` of the tensor indices
symmetry : ``TensorSymmetry`` of the tensor
Attributes
==========
``index_types``
``symmetry``
``types`` : list of ``TensorIndexType`` without repetitions
Examples
========
Define a symmetric tensor
>>> from sympy.tensor.tensor import TensorIndexType, tensorsymmetry, TensorType
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1, 1])
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
is_commutative = False
def __new__(cls, index_types, symmetry, **kw_args):
assert symmetry.rank == len(index_types)
obj = Basic.__new__(cls, Tuple(*index_types), symmetry, **kw_args)
return obj
@property
def index_types(self):
return self.args[0]
@property
def symmetry(self):
return self.args[1]
@property
def types(self):
return sorted(set(self.index_types), key=lambda x: x.name)
def __str__(self):
return 'TensorType(%s)' % ([str(x) for x in self.index_types])
def __call__(self, s, comm=0, matrix_behavior=0):
"""
Return a TensorHead object or a list of TensorHead objects.
``s`` name or string of names
``comm``: commutation group number
see ``_TensorManager.set_comm``
Examples
========
Define symmetric tensors ``V``, ``W`` and ``G``, respectively
commuting, anticommuting and with no commutation symmetry
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorsymmetry, TensorType, canon_bp
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> sym2 = tensorsymmetry([1]*2)
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
>>> W = S2('W', 1)
>>> G = S2('G', 2)
>>> canon_bp(V(a, b)*V(-b, -a))
V(L_0, L_1)*V(-L_0, -L_1)
>>> canon_bp(W(a, b)*W(-b, -a))
0
"""
if isinstance(s, str):
names = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
if len(names) == 1:
return TensorHead(names[0], self, comm, matrix_behavior=matrix_behavior)
else:
return [TensorHead(name, self, comm, matrix_behavior=matrix_behavior) for name in names]
def tensorhead(name, typ, sym, comm=0, matrix_behavior=0):
"""
Function generating tensorhead(s).
Parameters
==========
name : name or sequence of names (as in ``symbol``)
typ : index types
sym : same as ``*args`` in ``tensorsymmetry``
comm : commutation group number
see ``_TensorManager.set_comm``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[1]*2])
>>> A(a, -b)
A(a, -b)
"""
sym = tensorsymmetry(*sym)
S = TensorType(typ, sym)
th = S(name, comm, matrix_behavior=matrix_behavior)
return th
@doctest_depends_on(modules=('numpy',))
class TensorHead(Basic):
r"""
Tensor head of the tensor
Parameters
==========
name : name of the tensor
typ : list of TensorIndexType
comm : commutation group number
Attributes
==========
``name``
``index_types``
``rank``
``types`` : equal to ``typ.types``
``symmetry`` : equal to ``typ.symmetry``
``comm`` : commutation group
Notes
=====
A ``TensorHead`` belongs to a commutation group, defined by a
symbol on number ``comm`` (see ``_TensorManager.set_comm``);
tensors in a commutation group have the same commutation properties;
by default ``comm`` is ``0``, the group of the commuting tensors.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensorhead, TensorType
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> A = tensorhead('A', [Lorentz, Lorentz], [[1],[1]])
Examples with ndarray values, the components data assigned to the
``TensorHead`` object are assumed to be in a fully-contravariant
representation. In case it is necessary to assign components data which
represents the values of a non-fully covariant tensor, see the other
examples.
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> Lorentz.data = [1, -1, -1, -1]
>>> i0, i1 = tensor_indices('i0:2', Lorentz)
>>> A.data = [[j+2*i for j in range(4)] for i in range(4)]
in order to retrieve data, it is also necessary to specify abstract indices
enclosed by round brackets, then numerical indices inside square brackets.
>>> A(i0, i1)[0, 0]
0
>>> A(i0, i1)[2, 3] == 3+2*2
True
Notice that square brackets create a valued tensor expression instance:
>>> A(i0, i1)
A(i0, i1)
To view the data, just type:
>>> A.data
[[0 1 2 3]
[2 3 4 5]
[4 5 6 7]
[6 7 8 9]]
Turning to a tensor expression, covariant indices get the corresponding
components data corrected by the metric:
>>> A(i0, -i1).data
[[0 -1 -2 -3]
[2 -3 -4 -5]
[4 -5 -6 -7]
[6 -7 -8 -9]]
>>> A(-i0, -i1).data
[[0 -1 -2 -3]
[-2 3 4 5]
[-4 5 6 7]
[-6 7 8 9]]
while if all indices are contravariant, the ``ndarray`` remains the same
>>> A(i0, i1).data
[[0 1 2 3]
[2 3 4 5]
[4 5 6 7]
[6 7 8 9]]
When all indices are contracted and components data are added to the tensor,
accessing the data will return a scalar, no numpy object. In fact, numpy
ndarrays are dropped to scalars if they contain only one element.
>>> A(i0, -i0)
A(L_0, -L_0)
>>> A(i0, -i0).data
-18
It is also possible to assign components data to an indexed tensor, i.e. a
tensor with specified covariant and contravariant components. In this
example, the covariant components data of the Electromagnetic tensor are
injected into `A`:
>>> from sympy import symbols
>>> Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z')
>>> c = symbols('c', positive=True)
Let's define `F`, an antisymmetric tensor, we have to assign an
antisymmetric matrix to it, because `[[2]]` stands for the Young tableau
representation of an antisymmetric set of two elements:
>>> F = tensorhead('A', [Lorentz, Lorentz], [[2]])
>>> F(-i0, -i1).data = [
... [0, Ex/c, Ey/c, Ez/c],
... [-Ex/c, 0, -Bz, By],
... [-Ey/c, Bz, 0, -Bx],
... [-Ez/c, -By, Bx, 0]]
Now it is possible to retrieve the contravariant form of the Electromagnetic
tensor:
>>> F(i0, i1).data
[[0 -E_x/c -E_y/c -E_z/c]
[E_x/c 0 -B_z B_y]
[E_y/c B_z 0 -B_x]
[E_z/c -B_y B_x 0]]
and the mixed contravariant-covariant form:
>>> F(i0, -i1).data
[[0 E_x/c E_y/c E_z/c]
[E_x/c 0 B_z -B_y]
[E_y/c -B_z 0 B_x]
[E_z/c B_y -B_x 0]]
To convert the numpy's ndarray to a sympy matrix, just cast:
>>> from sympy import Matrix
>>> Matrix(F.data)
Matrix([
[ 0, -E_x/c, -E_y/c, -E_z/c],
[E_x/c, 0, -B_z, B_y],
[E_y/c, B_z, 0, -B_x],
[E_z/c, -B_y, B_x, 0]])
Still notice, in this last example, that accessing components data from a
tensor without specifying the indices is equivalent to assume that all
indices are contravariant.
It is also possible to store symbolic components data inside a tensor, for
example, define a four-momentum-like tensor:
>>> from sympy import symbols
>>> P = tensorhead('P', [Lorentz], [[1]])
>>> E, px, py, pz = symbols('E p_x p_y p_z', positive=True)
>>> P.data = [E, px, py, pz]
The contravariant and covariant components are, respectively:
>>> P(i0).data
[E p_x p_y p_z]
>>> P(-i0).data
[E -p_x -p_y -p_z]
The contraction of a 1-index tensor by itself is usually indicated by a
power by two:
>>> P(i0)**2
E**2 - p_x**2 - p_y**2 - p_z**2
As the power by two is clearly identical to `P_\mu P^\mu`, it is possible to
simply contract the ``TensorHead`` object, without specifying the indices
>>> P**2
E**2 - p_x**2 - p_y**2 - p_z**2
"""
is_commutative = False
def __new__(cls, name, typ, comm=0, matrix_behavior=0, **kw_args):
if isinstance(name, string_types):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
else:
raise ValueError("invalid name")
comm2i = TensorManager.comm_symbols2i(comm)
obj = Basic.__new__(cls, name_symbol, typ, **kw_args)
obj._matrix_behavior = matrix_behavior
obj._name = obj.args[0].name
obj._rank = len(obj.index_types)
obj._types = typ.types
obj._symmetry = typ.symmetry
obj._comm = comm2i
return obj
@property
def name(self):
return self._name
@property
def rank(self):
return self._rank
@property
def types(self):
return self._types[:]
@property
def symmetry(self):
return self._symmetry
@property
def typ(self):
return self.args[1]
@property
def comm(self):
return self._comm
@property
def index_types(self):
return self.args[1].index_types[:]
def __lt__(self, other):
return (self.name, self.index_types) < (other.name, other.index_types)
def commutes_with(self, other):
"""
Returns ``0`` if ``self`` and ``other`` commute, ``1`` if they anticommute.
Returns ``None`` if ``self`` and ``other`` neither commute nor anticommute.
"""
r = TensorManager.get_comm(self._comm, other._comm)
return r
def _print(self):
return '%s(%s)' %(self.name, ','.join([str(x) for x in self.index_types]))
def _check_auto_matrix_indices_in_call(self, *indices):
matrix_behavior_kinds = dict()
if len(indices) != len(self.index_types):
if not self._matrix_behavior:
raise ValueError('wrong number of indices')
# Take the last one or two missing
# indices as auto-matrix indices:
ldiff = len(self.index_types) - len(indices)
if ldiff > 2:
raise ValueError('wrong number of indices')
if ldiff == 2:
mat_ind = [len(indices), len(indices) + 1]
elif ldiff == 1:
mat_ind = [len(indices)]
not_equal = True
else:
not_equal = False
mat_ind = [i for i, e in enumerate(indices) if e is True]
if mat_ind:
not_equal = True
indices = tuple([_ for _ in indices if _ is not True])
for i, el in enumerate(indices):
if not isinstance(el, TensorIndex):
not_equal = True
break
if el._tensortype != self.index_types[i]:
not_equal = True
break
if not_equal:
for el in mat_ind:
eltyp = self.index_types[el]
if eltyp in matrix_behavior_kinds:
elind = -self.index_types[el].auto_right
matrix_behavior_kinds[eltyp].append(elind)
else:
elind = self.index_types[el].auto_left
matrix_behavior_kinds[eltyp] = [elind]
indices = indices[:el] + (elind,) + indices[el:]
return indices, matrix_behavior_kinds
def __call__(self, *indices, **kw_args):
"""
Returns a tensor with indices.
There is a special behavior in case of indices denoted by ``True``,
they are considered auto-matrix indices, their slots are automatically
filled, and confer to the tensor the behavior of a matrix or vector
upon multiplication with another tensor containing auto-matrix indices
of the same ``TensorIndexType``. This means indices get summed over the
same way as in matrix multiplication. For matrix behavior, define two
auto-matrix indices, for vector behavior define just one.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[1]*2])
>>> t = A(a, -b)
>>> t
A(a, -b)
To use the auto-matrix index behavior, just put a ``True`` on the
desired index position.
>>> r = A(True, True)
>>> r
A(auto_left, -auto_right)
Here ``auto_left`` and ``auto_right`` are automatically generated
tensor indices, they are only two for every ``TensorIndexType`` and
can be assigned to just one or two indices of a given type.
Auto-matrix indices can be assigned many times in a tensor, if indices
are of different ``TensorIndexType``
>>> Spinor = TensorIndexType('Spinor', dummy_fmt='S')
>>> B = tensorhead('B', [Lorentz, Lorentz, Spinor, Spinor], [[1]*4])
>>> s = B(True, True, True, True)
>>> s
B(auto_left, -auto_right, auto_left, -auto_right)
Here, ``auto_left`` and ``auto_right`` are repeated twice, but they are
not the same indices, as they refer to different ``TensorIndexType``s.
Auto-matrix indices are automatically contracted upon multiplication,
>>> r*s
A(auto_left, L_0)*B(-L_0, -auto_right, auto_left, -auto_right)
The multiplication algorithm has found an ``auto_right`` index in ``A``
and an ``auto_left`` index in ``B`` referring to the same
``TensorIndexType`` (``Lorentz``), so they have been contracted.
Auto-matrix indices can be accessed from the ``TensorIndexType``:
>>> Lorentz.auto_right
auto_right
>>> Lorentz.auto_left
auto_left
There is a special case, in which the ``True`` parameter is not needed
to declare an auto-matrix index, i.e. when the matrix behavior has been
declared upon ``TensorHead`` construction, in that case the last one or
two tensor indices may be omitted, so that they automatically become
auto-matrix indices:
>>> C = tensorhead('C', [Lorentz, Lorentz], [[1]*2], matrix_behavior=True)
>>> C()
C(auto_left, -auto_right)
"""
indices, matrix_behavior_kinds = self._check_auto_matrix_indices_in_call(*indices)
tensor = Tensor._new_with_dummy_replacement(self, indices, **kw_args)
return tensor
def __pow__(self, other):
if self.data is None:
raise ValueError("No power on abstract tensors.")
numpy = import_module('numpy')
metrics = [_.data for _ in self.args[1].args[0]]
marray = self.data
for metric in metrics:
marray = numpy.tensordot(marray, numpy.tensordot(metric, marray, (1, 0)), (0, 0))
pow2 = marray[()]
return pow2 ** (Rational(1, 2) * other)
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
return self.data.flatten().__iter__()
def _components_data_full_destroy(self):
"""
EXPERIMENTAL: do not rely on this API method.
Destroy components data associated to the ``TensorHead`` object, this
checks for attached components data, and destroys components data too.
"""
# do not garbage collect Kronecker tensor (it should be done by
# ``TensorIndexType`` garbage collection)
if self.name == "KD":
return
# the data attached to a tensor must be deleted only by the TensorHead
# destructor. If the TensorHead is deleted, it means that there are no
# more instances of that tensor anywhere.
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
@doctest_depends_on(modules=('numpy',))
class TensExpr(Basic):
"""
Abstract base class for tensor expressions
Notes
=====
A tensor expression is an expression formed by tensors;
currently the sums of tensors are distributed.
A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``.
``TensAdd`` objects are put in canonical form using the Butler-Portugal
algorithm for canonicalization under monoterm symmetries.
``TensMul`` objects are formed by products of component tensors,
and include a coefficient, which is a SymPy expression.
In the internal representation contracted indices are represented
by ``(ipos1, ipos2, icomp1, icomp2)``, where ``icomp1`` is the position
of the component tensor with contravariant index, ``ipos1`` is the
slot which the index occupies in that component tensor.
Contracted indices are therefore nameless in the internal representation.
"""
_op_priority = 11.0
is_commutative = False
def __neg__(self):
return self*S.NegativeOne
def __abs__(self):
raise NotImplementedError
def __add__(self, other):
raise NotImplementedError
def __radd__(self, other):
raise NotImplementedError
def __sub__(self, other):
raise NotImplementedError
def __rsub__(self, other):
raise NotImplementedError
def __mul__(self, other):
raise NotImplementedError
def __rmul__(self, other):
raise NotImplementedError
def __pow__(self, other):
if self.data is None:
raise ValueError("No power without ndarray data.")
numpy = import_module('numpy')
free = self.free
marray = self.data
for metric in free:
marray = numpy.tensordot(
marray,
numpy.tensordot(
metric[0]._tensortype.data,
marray,
(1, 0)
),
(0, 0)
)
pow2 = marray[()]
return pow2 ** (Rational(1, 2) * other)
def __rpow__(self, other):
raise NotImplementedError
def __div__(self, other):
raise NotImplementedError
def __rdiv__(self, other):
raise NotImplementedError()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@doctest_depends_on(modules=('numpy',))
def get_matrix(self):
"""
Returns ndarray components data as a matrix, if components data are
available and ndarray dimension does not exceed 2.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensorsymmetry, TensorType
>>> from sympy import ones
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1]*2)
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> A = S2('A')
The tensor ``A`` is symmetric in its indices, as can be deduced by the
``[1, 1]`` Young tableau when constructing `sym2`. One has to be
careful to assign symmetric component data to ``A``, as the symmetry
properties of data are currently not checked to be compatible with the
defined tensor symmetry.
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> Lorentz.data = [1, -1, -1, -1]
>>> i0, i1 = tensor_indices('i0:2', Lorentz)
>>> A.data = [[j+i for j in range(4)] for i in range(4)]
>>> A(i0, i1).get_matrix()
Matrix([
[0, 1, 2, 3],
[1, 2, 3, 4],
[2, 3, 4, 5],
[3, 4, 5, 6]])
It is possible to perform usual operation on matrices, such as the
matrix multiplication:
>>> A(i0, i1).get_matrix()*ones(4, 1)
Matrix([
[ 6],
[10],
[14],
[18]])
"""
if 0 < self.rank <= 2:
rows = self.data.shape[0]
columns = self.data.shape[1] if self.rank == 2 else 1
if self.rank == 2:
mat_list = [] * rows
for i in range(rows):
mat_list.append([])
for j in range(columns):
mat_list[i].append(self[i, j])
else:
mat_list = [None] * rows
for i in range(rows):
mat_list[i] = self[i]
return Matrix(mat_list)
else:
raise NotImplementedError(
"missing multidimensional reduction to matrix.")
def _eval_simplify(self, ratio, measure):
# this is a way to simplify a tensor expression.
# This part walks for all `TensorHead`s appearing in the tensor expr
# and looks for `simplify_this_type`, to specifically act on a subexpr
# containing one type of `TensorHead` instance only:
expr = self
for i in list(set(self.components)):
if hasattr(i, 'simplify_this_type'):
expr = i.simplify_this_type(expr)
# TODO: missing feature, perform metric contraction.
return expr
@doctest_depends_on(modules=('numpy',))
class TensAdd(TensExpr):
"""
Sum of tensors
Parameters
==========
free_args : list of the free indices
Attributes
==========
``args`` : tuple of addends
``rank`` : rank of the tensor
``free_args`` : list of the free indices in sorted order
Notes
=====
Sum of more than one tensor are put automatically in canonical form.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensorhead, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t = p(a) + q(a); t
p(a) + q(a)
>>> t(b)
p(b) + q(b)
Examples with components data added to the tensor expression:
>>> from sympy import eye
>>> Lorentz.data = [1, -1, -1, -1]
>>> a, b = tensor_indices('a, b', Lorentz)
>>> p.data = [2, 3, -2, 7]
>>> q.data = [2, 3, -2, 7]
>>> t = p(a) + q(a); t
p(a) + q(a)
>>> t(b)
p(b) + q(b)
The following are: 2**2 - 3**2 - 2**2 - 7**2 ==> -58
>>> (p(a)*p(-a)).data
-58
>>> p(a)**2
-58
"""
def __new__(cls, *args, **kw_args):
args = [sympify(x) for x in args if x]
args = TensAdd._tensAdd_flatten(args)
if not args:
return S.Zero
if len(args) == 1 and not isinstance(args[0], TensExpr):
return args[0]
# replace auto-matrix indices so that they are the same in all addends
args = TensAdd._tensAdd_check_automatrix(args)
# now check that all addends have the same indices:
TensAdd._tensAdd_check(args)
# if TensAdd has only 1 TensMul element in its `args`:
if len(args) == 1 and isinstance(args[0], TensMul):
obj = Basic.__new__(cls, *args, **kw_args)
return obj
# TODO: do not or do canonicalize by default?
# Technically, one may wish to have additions of non-canonicalized
# tensors. This feature should be removed in the future.
# Unfortunately this would require to rewrite a lot of tests.
# canonicalize all TensMul
args = [canon_bp(x) for x in args if x]
args = [x for x in args if x]
# if there are no more args (i.e. have cancelled out),
# just return zero:
if not args:
return S.Zero
if len(args) == 1:
return args[0]
# collect canonicalized terms
def sort_key(t):
x = get_tids(t)
return (x.components, x.free, x.dum)
args.sort(key=sort_key)
args = TensAdd._tensAdd_collect_terms(args)
if not args:
return S.Zero
# it there is only a component tensor return it
if len(args) == 1:
return args[0]
obj = Basic.__new__(cls, *args, **kw_args)
return obj
@staticmethod
def _tensAdd_flatten(args):
# flatten TensAdd, coerce terms which are not tensors to tensors
if not all(isinstance(x, TensExpr) for x in args):
args_expanded = []
for x in args:
if isinstance(x, TensAdd):
args_expanded.extend(list(x.args))
else:
args_expanded.append(x)
args_tensor = []
args_scalar = []
for x in args_expanded:
if isinstance(x, TensExpr) and x.coeff:
args_tensor.append(x)
if not isinstance(x, TensExpr):
args_scalar.append(x)
t1 = TensMul.from_data(Add(*args_scalar), [], [], [])
args = [t1] + args_tensor
a = []
for x in args:
if isinstance(x, TensAdd):
a.extend(list(x.args))
else:
a.append(x)
args = [x for x in a if x.coeff]
return args
@staticmethod
def _tensAdd_check_automatrix(args):
# check that all automatrix indices are the same.
# if there are no addends, just return.
if not args:
return args
# @type auto_left_types: set
auto_left_types = set([])
auto_right_types = set([])
args_auto_left_types = []
args_auto_right_types = []
for i, arg in enumerate(args):
arg_auto_left_types = set([])
arg_auto_right_types = set([])
for index in get_indices(arg):
# @type index: TensorIndex
if index in (index._tensortype.auto_left, -index._tensortype.auto_left):
auto_left_types.add(index._tensortype)
arg_auto_left_types.add(index._tensortype)
if index in (index._tensortype.auto_right, -index._tensortype.auto_right):
auto_right_types.add(index._tensortype)
arg_auto_right_types.add(index._tensortype)
args_auto_left_types.append(arg_auto_left_types)
args_auto_right_types.append(arg_auto_right_types)
for arg, aas_left, aas_right in zip(args, args_auto_left_types, args_auto_right_types):
missing_left = auto_left_types - aas_left
missing_right = auto_right_types - aas_right
missing_intersection = missing_left & missing_right
for j in missing_intersection:
args[i] *= j.delta(j.auto_left, -j.auto_right)
if missing_left != missing_right:
raise ValueError("cannot determine how to add auto-matrix indices on some args")
return args
@staticmethod
def _tensAdd_check(args):
# check that all addends have the same free indices
indices0 = {x[0] for x in get_tids(args[0]).free}
list_indices = [{y[0] for y in get_tids(x).free} for x in args[1:]]
if not all(x == indices0 for x in list_indices):
raise ValueError('all tensors must have the same indices')
@staticmethod
def _tensAdd_collect_terms(args):
# collect TensMul terms differing at most by their coefficient
a = []
prev = args[0]
prev_coeff = get_coeff(prev)
changed = False
for x in args[1:]:
# if x and prev have the same tensor, update the coeff of prev
x_tids = get_tids(x)
prev_tids = get_tids(prev)
if x_tids.components == prev_tids.components \
and x_tids.free == prev_tids.free and x_tids.dum == prev_tids.dum:
prev_coeff = prev_coeff + get_coeff(x)
changed = True
op = 0
else:
# x and prev are different; if not changed, prev has not
# been updated; store it
if not changed:
a.append(prev)
else:
# get a tensor from prev with coeff=prev_coeff and store it
if prev_coeff:
t = TensMul.from_data(prev_coeff, prev_tids.components,
prev_tids.free, prev_tids.dum)
a.append(t)
# move x to prev
op = 1
pprev, prev = prev, x
pprev_coeff, prev_coeff = prev_coeff, get_coeff(x)
changed = False
# if the case op=0 prev was not stored; store it now
# in the case op=1 x was not stored; store it now (as prev)
if op == 0 and prev_coeff:
prev = TensMul.from_data(prev_coeff, prev_tids.components, prev_tids.free, prev_tids.dum)
a.append(prev)
elif op == 1:
a.append(prev)
return a
@property
def rank(self):
return self.args[0].rank
@property
def free_args(self):
return self.args[0].free_args
def __call__(self, *indices):
"""Returns tensor with ordered free indices replaced by ``indices``
Parameters
==========
indices
Examples
========
>>> from sympy import Symbol
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> D = Symbol('D')
>>> Lorentz = TensorIndexType('Lorentz', dim=D, dummy_fmt='L')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> g = Lorentz.metric
>>> t = p(i0)*p(i1) + g(i0,i1)*q(i2)*q(-i2)
>>> t(i0,i2)
metric(i0, i2)*q(L_0)*q(-L_0) + p(i0)*p(i2)
>>> t(i0,i1) - t(i1,i0)
0
"""
free_args = self.free_args
indices = list(indices)
if [x._tensortype for x in indices] != [x._tensortype for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
index_tuples = list(zip(free_args, indices))
a = [x.func(*x.fun_eval(*index_tuples).args) for x in self.args]
res = TensAdd(*a)
return res
def canon_bp(self):
"""
canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
"""
args = [x.canon_bp() for x in self.args]
res = TensAdd(*args)
return res
def equals(self, other):
other = sympify(other)
if isinstance(other, TensMul) and other._coeff == 0:
return all(x._coeff == 0 for x in self.args)
if isinstance(other, TensExpr):
if self.rank != other.rank:
return False
if isinstance(other, TensAdd):
if set(self.args) != set(other.args):
return False
else:
return True
t = self - other
if not isinstance(t, TensExpr):
return t == 0
else:
if isinstance(t, TensMul):
return t._coeff == 0
else:
return all(x._coeff == 0 for x in t.args)
def __add__(self, other):
return TensAdd(self, other)
def __radd__(self, other):
return TensAdd(other, self)
def __sub__(self, other):
return TensAdd(self, -other)
def __rsub__(self, other):
return TensAdd(other, -self)
def __mul__(self, other):
return TensAdd(*(x*other for x in self.args))
def __rmul__(self, other):
return self*other
def __div__(self, other):
other = sympify(other)
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
return TensAdd(*(x/other for x in self.args))
def __rdiv__(self, other):
raise ValueError('cannot divide by a tensor')
def __getitem__(self, item):
return self.data[item]
__truediv__ = __div__
__truerdiv__ = __rdiv__
def contract_delta(self, delta):
args = [x.contract_delta(delta) for x in self.args]
t = TensAdd(*args)
return canon_bp(t)
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``
Parameters
==========
g : metric
contract_all : if True, eliminate all ``g`` which are contracted
Notes
=====
see the ``TensorIndexType`` docstring for the contraction conventions
"""
args = [contract_metric(x, g) for x in self.args]
t = TensAdd(*args)
return canon_bp(t)
def fun_eval(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
Parameters
==========
index_types : list of tuples ``(old_index, new_index)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j) + A(i, -j)
>>> t.fun_eval((i, k),(-j, l))
A(k, L_0)*B(l, -L_0) + A(k, l)
"""
args = self.args
args1 = []
for x in args:
y = x.fun_eval(*index_tuples)
args1.append(y)
return TensAdd(*args1)
def substitute_indices(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
Parameters
==========
index_types : list of tuples ``(old_index, new_index)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j); t
A(i, L_0)*B(-L_0, -j)
>>> t.substitute_indices((i,j), (j, k))
A(j, L_0)*B(-L_0, -k)
"""
args = self.args
args1 = []
for x in args:
y = x.substitute_indices(*index_tuples)
args1.append(y)
return TensAdd(*args1)
def _print(self):
a = []
args = self.args
for x in args:
a.append(str(x))
a.sort()
s = ' + '.join(a)
s = s.replace('+ -', '- ')
return s
@staticmethod
def from_TIDS_list(coeff, tids_list):
"""
Given a list of coefficients and a list of ``TIDS`` objects, construct
a ``TensAdd`` instance, equivalent to the one that would result from
creating single instances of ``TensMul`` and then adding them.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead, TensAdd
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j = tensor_indices('i,j', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> eA = 3*A(i, j)
>>> eB = 2*B(j, i)
>>> t1 = eA._tids
>>> t2 = eB._tids
>>> c1 = eA.coeff
>>> c2 = eB.coeff
>>> TensAdd.from_TIDS_list([c1, c2], [t1, t2])
2*B(i, j) + 3*A(i, j)
If the coefficient parameter is a scalar, then it will be applied
as a coefficient on all ``TIDS`` objects.
>>> TensAdd.from_TIDS_list(4, [t1, t2])
4*A(i, j) + 4*B(i, j)
"""
if not isinstance(coeff, (list, tuple, Tuple)):
coeff = [coeff] * len(tids_list)
tensmul_list = [TensMul.from_TIDS(c, t) for c, t in zip(coeff, tids_list)]
return TensAdd(*tensmul_list)
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
# TODO: check data compatibility with properties of tensor.
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
if not self.data:
raise ValueError("No iteration on abstract tensors")
return self.data.flatten().__iter__()
@doctest_depends_on(modules=('numpy',))
class Tensor(TensExpr):
"""
Base tensor class, i.e. this represents a tensor, the single unit to be
put into an expression.
This object is usually created from a ``TensorHead``, by attaching indices
to it. Indices preceded by a minus sign are considered contravariant,
otherwise covariant.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType("Lorentz", dummy_fmt="L")
>>> mu, nu = tensor_indices('mu nu', Lorentz)
>>> A = tensorhead("A", [Lorentz, Lorentz], [[1], [1]])
>>> A(mu, -nu)
A(mu, -nu)
>>> A(mu, -mu)
A(L_0, -L_0)
"""
is_commutative = False
def __new__(cls, tensor_head, indices, **kw_args):
tids = TIDS.from_components_and_indices((tensor_head,), indices)
obj = Basic.__new__(cls, tensor_head, Tuple(*indices), **kw_args)
obj._tids = tids
obj._indices = indices
obj._is_canon_bp = kw_args.get('is_canon_bp', False)
return obj
@staticmethod
def _new_with_dummy_replacement(tensor_head, indices, **kw_args):
tids = TIDS.from_components_and_indices((tensor_head,), indices)
indices = tids.get_indices()
return Tensor(tensor_head, indices, **kw_args)
@property
def is_canon_bp(self):
return self._is_canon_bp
@property
def indices(self):
return self._indices
@property
def free(self):
return self._tids.free
@property
def dum(self):
return self._tids.dum
@property
def rank(self):
return len(self.free)
@property
def free_args(self):
return sorted([x[0] for x in self.free])
def perm2tensor(self, g, canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, canon_bp)
def canon_bp(self):
if self._is_canon_bp:
return self
g, dummies, msym, v = self._tids.canon_args()
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return S.Zero
tensor = self.perm2tensor(can, True)
return tensor
@property
def types(self):
return get_tids(self).components[0]._types
@property
def coeff(self):
return S.One
@property
def component(self):
return self.args[0]
@property
def components(self):
return [self.args[0]]
def split(self):
return [self]
def expand(self):
return self
def sorted_components(self):
return self
def get_indices(self):
"""
Get a list of indices, corresponding to those of the tensor.
"""
return self._tids.get_indices()
def as_base_exp(self):
return self, S.One
def substitute_indices(self, *index_tuples):
return substitute_indices(self, *index_tuples)
def __call__(self, *indices):
"""Returns tensor with ordered free indices replaced by ``indices``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> A = tensorhead('A', [Lorentz]*5, [[1]*5])
>>> t = A(i2, i1, -i2, -i3, i4)
>>> t
A(L_0, i1, -L_0, -i3, i4)
>>> t(i1, i2, i3)
A(L_0, i1, -L_0, i2, i3)
"""
free_args = self.free_args
indices = list(indices)
if [x._tensortype for x in indices] != [x._tensortype for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.fun_eval(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len(set(i if i.is_up else -i for i in indices)) != len(indices):
return t.func(*t.args)
return t
def fun_eval(self, *index_tuples):
free = self.free
free1 = []
for j, ipos, cpos in free:
# search j in index_tuples
for i, v in index_tuples:
if i == j:
free1.append((v, ipos, cpos))
break
else:
free1.append((j, ipos, cpos))
return TensMul.from_data(self.coeff, self.components, free1, self.dum)
# TODO: put this into TensExpr?
def __iter__(self):
return self.data.flatten().__iter__()
# TODO: put this into TensExpr?
def __getitem__(self, item):
return self.data[item]
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
# TODO: check data compatibility with properties of tensor.
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
if self.metric in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self.metric]
def __mul__(self, other):
if isinstance(other, TensAdd):
return TensAdd(*[self*arg for arg in other.args])
tmul = TensMul(self, other)
return tmul
def __rmul__(self, other):
return TensMul(other, self)
def __div__(self, other):
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
return TensMul(self, S.One/other, is_canon_bp=self.is_canon_bp)
def __rdiv__(self, other):
raise ValueError('cannot divide by a tensor')
def __add__(self, other):
return TensAdd(self, other)
def __radd__(self, other):
return TensAdd(other, self)
def __sub__(self, other):
return TensAdd(self, -other)
def __rsub__(self, other):
return TensAdd(other, self)
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __neg__(self):
return TensMul(S.NegativeOne, self)
def _print(self):
indices = [str(ind) for ind in self.indices]
component = self.component
if component.rank > 0:
return ('%s(%s)' % (component.name, ', '.join(indices)))
else:
return ('%s' % component.name)
def equals(self, other):
if other == 0:
return self.coeff == 0
other = sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return S.One == other
def _get_compar_comp(self):
t = self.canon_bp()
r = (t.coeff, tuple(t.components), \
tuple(sorted(t.free)), tuple(sorted(t.dum)))
return r
return _get_compar_comp(self) == _get_compar_comp(other)
def contract_metric(self, metric):
tids, sign = get_tids(self).contract_metric(metric)
return TensMul.from_TIDS(sign, tids)
def contract_delta(self, metric):
return self.contract_metric(metric)
@doctest_depends_on(modules=('numpy',))
class TensMul(TensExpr):
"""
Product of tensors
Parameters
==========
coeff : SymPy coefficient of the tensor
args
Attributes
==========
``components`` : list of ``TensorHead`` of the component tensors
``types`` : list of nonrepeated ``TensorIndexType``
``free`` : list of ``(ind, ipos, icomp)``, see Notes
``dum`` : list of ``(ipos1, ipos2, icomp1, icomp2)``, see Notes
``ext_rank`` : rank of the tensor counting the dummy indices
``rank`` : rank of the tensor
``coeff`` : SymPy coefficient of the tensor
``free_args`` : list of the free indices in sorted order
``is_canon_bp`` : ``True`` if the tensor in in canonical form
Notes
=====
``args[0]`` list of ``TensorHead`` of the component tensors.
``args[1]`` list of ``(ind, ipos, icomp)``
where ``ind`` is a free index, ``ipos`` is the slot position
of ``ind`` in the ``icomp``-th component tensor.
``args[2]`` list of tuples representing dummy indices.
``(ipos1, ipos2, icomp1, icomp2)`` indicates that the contravariant
dummy index is the ``ipos1``-th slot position in the ``icomp1``-th
component tensor; the corresponding covariant index is
in the ``ipos2`` slot position in the ``icomp2``-th component tensor.
"""
def __new__(cls, *args, **kw_args):
# make sure everything is sympified:
args = [sympify(arg) for arg in args]
# flatten:
args = TensMul._flatten(args)
is_canon_bp = kw_args.get('is_canon_bp', False)
if not any([isinstance(arg, TensExpr) for arg in args]):
tids = TIDS([], [], [])
else:
tids_list = [arg._tids for arg in args if isinstance(arg, (Tensor, TensMul))]
if len(tids_list) == 1:
for arg in args:
if not isinstance(arg, Tensor):
continue
is_canon_bp = kw_args.get('is_canon_bp', arg._is_canon_bp)
tids = reduce(lambda a, b: a*b, tids_list)
if any([isinstance(arg, TensAdd) for arg in args]):
add_args = TensAdd._tensAdd_flatten(args)
return TensAdd(*add_args)
coeff = reduce(lambda a, b: a*b, [S.One] + [arg for arg in args if not isinstance(arg, TensExpr)])
args = tids.get_tensors()
if coeff != 1:
args = [coeff] + args
if len(args) == 1:
return args[0]
obj = Basic.__new__(cls, *args)
obj._types = []
for t in tids.components:
obj._types.extend(t._types)
obj._tids = tids
obj._ext_rank = len(obj._tids.free) + 2*len(obj._tids.dum)
obj._coeff = coeff
obj._is_canon_bp = is_canon_bp
return obj
@staticmethod
def _flatten(args):
a = []
for arg in args:
if isinstance(arg, TensMul):
a.extend(arg.args)
else:
a.append(arg)
return a
@staticmethod
def from_data(coeff, components, free, dum, **kw_args):
tids = TIDS(components, free, dum)
return TensMul.from_TIDS(coeff, tids, **kw_args)
@staticmethod
def from_TIDS(coeff, tids, **kw_args):
return TensMul(coeff, *tids.get_tensors(), **kw_args)
@property
def free_args(self):
return sorted([x[0] for x in self.free])
@property
def components(self):
return self._tids.components[:]
@property
def free(self):
return self._tids.free[:]
@property
def coeff(self):
return self._coeff
@property
def dum(self):
return self._tids.dum[:]
@property
def rank(self):
return len(self.free)
@property
def types(self):
return self._types[:]
def equals(self, other):
if other == 0:
return self.coeff == 0
other = sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return self._coeff == other
def _get_compar_comp(self):
t = self.canon_bp()
r = (get_coeff(t), tuple(t.components), \
tuple(sorted(t.free)), tuple(sorted(t.dum)))
return r
return _get_compar_comp(self) == _get_compar_comp(other)
def get_indices(self):
"""
Returns the list of indices of the tensor
The indices are listed in the order in which they appear in the
component tensors.
The dummy indices are given a name which does not collide with
the names of the free indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t = p(m1)*g(m0,m2)
>>> t.get_indices()
[m1, m0, m2]
"""
return self._tids.get_indices()
def split(self):
"""
Returns a list of tensors, whose product is ``self``
Dummy indices contracted among different tensor components
become free indices with the same name as the one used to
represent the dummy indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(a,b)*B(-b,c)
>>> t
A(a, L_0)*B(-L_0, c)
>>> t.split()
[A(a, L_0), B(-L_0, c)]
"""
if self.args == ():
return [self]
splitp = []
res = 1
for arg in self.args:
if isinstance(arg, Tensor):
splitp.append(res*arg)
res = 1
else:
res *= arg
return splitp
def __add__(self, other):
return TensAdd(self, other)
def __radd__(self, other):
return TensAdd(other, self)
def __sub__(self, other):
return TensAdd(self, -other)
def __rsub__(self, other):
return TensAdd(other, -self)
def __mul__(self, other):
"""
Multiply two tensors using Einstein summation convention.
If the two tensors have an index in common, one contravariant
and the other covariant, in their product the indices are summed
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t1 = p(m0)
>>> t2 = q(-m0)
>>> t1*t2
p(L_0)*q(-L_0)
"""
other = sympify(other)
if not isinstance(other, TensExpr):
coeff = self.coeff*other
tmul = TensMul.from_TIDS(coeff, self._tids, is_canon_bp=self._is_canon_bp)
return tmul
if isinstance(other, TensAdd):
return TensAdd(*[self*x for x in other.args])
new_tids = self._tids*other._tids
coeff = self.coeff*other.coeff
tmul = TensMul.from_TIDS(coeff, new_tids)
return tmul
def __rmul__(self, other):
other = sympify(other)
coeff = other*self._coeff
tmul = TensMul.from_TIDS(coeff, self._tids)
return tmul
def __div__(self, other):
other = sympify(other)
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
coeff = self._coeff/other
tmul = TensMul.from_TIDS(coeff, self._tids, is_canon_bp=self._is_canon_bp)
return tmul
def __rdiv__(self, other):
raise ValueError('cannot divide by a tensor')
def __getitem__(self, item):
return self.data[item]
__truediv__ = __div__
__truerdiv__ = __rdiv__
def sorted_components(self):
"""
Returns a tensor with sorted components
calling the corresponding method in a ``TIDS`` object.
"""
new_tids, sign = self._tids.sorted_components()
coeff = -self.coeff if sign == -1 else self.coeff
t = TensMul.from_TIDS(coeff, new_tids)
return t
def perm2tensor(self, g, canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, canon_bp)
def canon_bp(self):
"""
Canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[2]])
>>> t = A(m0,-m1)*A(m1,-m0)
>>> t.canon_bp()
-A(L_0, L_1)*A(-L_0, -L_1)
>>> t = A(m0,-m1)*A(m1,-m2)*A(m2,-m0)
>>> t.canon_bp()
0
"""
if self._is_canon_bp:
return self
if not self.components:
return self
t = self.sorted_components()
g, dummies, msym, v = t._tids.canon_args()
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return S.Zero
tmul = t.perm2tensor(can, True)
return tmul
def contract_delta(self, delta):
t = self.contract_metric(delta)
return t
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``
Parameters
==========
g : metric
Notes
=====
see the ``TensorIndexType`` docstring for the contraction conventions
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t = p(m0)*q(m1)*g(-m0, -m1)
>>> t.canon_bp()
metric(L_0, L_1)*p(-L_0)*q(-L_1)
>>> t.contract_metric(g).canon_bp()
p(L_0)*q(-L_0)
"""
tids, sign = get_tids(self).contract_metric(g)
res = TensMul.from_TIDS(sign*self.coeff, tids)
return res
def substitute_indices(self, *index_tuples):
return substitute_indices(self, *index_tuples)
def fun_eval(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
``index_types`` list of tuples ``(old_index, new_index)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j); t
A(i, L_0)*B(-L_0, -j)
>>> t.fun_eval((i, k),(-j, l))
A(k, L_0)*B(-L_0, l)
"""
free = self.free
free1 = []
for j, ipos, cpos in free:
# search j in index_tuples
for i, v in index_tuples:
if i == j:
free1.append((v, ipos, cpos))
break
else:
free1.append((j, ipos, cpos))
return TensMul.from_data(self.coeff, self.components, free1, self.dum)
def __call__(self, *indices):
"""Returns tensor product with ordered free indices replaced by ``indices``
Examples
========
>>> from sympy import Symbol
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> D = Symbol('D')
>>> Lorentz = TensorIndexType('Lorentz', dim=D, dummy_fmt='L')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t = p(i0)*q(i1)*q(-i1)
>>> t(i1)
p(i1)*q(L_0)*q(-L_0)
"""
free_args = self.free_args
indices = list(indices)
if [x._tensortype for x in indices] != [x._tensortype for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.fun_eval(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len(set(i if i.is_up else -i for i in indices)) != len(indices):
return t.func(*t.args)
return t
def _print(self):
args = self.args
get_str = lambda arg: str(arg) if arg.is_Atom or isinstance(arg, TensExpr) else ("(%s)" % str(arg))
if not args:
# no arguments is equivalent to "1", i.e. TensMul().
# If tensors are constructed correctly, this should never occur.
return "1"
if self.coeff == S.NegativeOne:
# expressions like "-A(a)"
return "-"+"*".join([get_str(arg) for arg in args[1:]])
# prints expressions like "A(a)", "3*A(a)", "(1+x)*A(a)"
return "*".join([get_str(arg) for arg in self.args])
@property
def data(self):
dat = _tensor_data_substitution_dict[self]
if dat is None:
return None
return self.coeff * dat
@data.setter
def data(self, data):
raise ValueError("Not possible to set component data to a tensor expression")
@data.deleter
def data(self):
raise ValueError("Not possible to delete component data to a tensor expression")
def __iter__(self):
if self.data is None:
raise ValueError("No iteration on abstract tensors")
return (self.data.flatten()).__iter__()
def canon_bp(p):
"""
Butler-Portugal canonicalization
"""
if isinstance(p, TensExpr):
return p.canon_bp()
return p
def tensor_mul(*a):
"""
product of tensors
"""
if not a:
return TensMul.from_data(S.One, [], [], [])
t = a[0]
for tx in a[1:]:
t = t*tx
return t
def riemann_cyclic_replace(t_r):
"""
replace Riemann tensor with an equivalent expression
``R(m,n,p,q) -> 2/3*R(m,n,p,q) - 1/3*R(m,q,n,p) + 1/3*R(m,p,n,q)``
"""
free = sorted(t_r.free, key=lambda x: x[1])
m, n, p, q = [x[0] for x in free]
t0 = S(2)/3*t_r
t1 = - S(1)/3*t_r.substitute_indices((m,m),(n,q),(p,n),(q,p))
t2 = S(1)/3*t_r.substitute_indices((m,m),(n,p),(p,n),(q,q))
t3 = t0 + t1 + t2
return t3
def riemann_cyclic(t2):
"""
replace each Riemann tensor with an equivalent expression
satisfying the cyclic identity.
This trick is discussed in the reference guide to Cadabra.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead, riemann_cyclic
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> R = tensorhead('R', [Lorentz]*4, [[2, 2]])
>>> t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l))
>>> riemann_cyclic(t)
0
"""
if isinstance(t2, (TensMul, Tensor)):
args = [t2]
else:
args = t2.args
a1 = [x.split() for x in args]
a2 = [[riemann_cyclic_replace(tx) for tx in y] for y in a1]
a3 = [tensor_mul(*v) for v in a2]
t3 = TensAdd(*a3)
if not t3:
return t3
else:
return canon_bp(t3)
def get_lines(ex, index_type):
"""
returns ``(lines, traces, rest)`` for an index type,
where ``lines`` is the list of list of positions of a matrix line,
``traces`` is the list of list of traced matrix lines,
``rest`` is the rest of the elements ot the tensor.
"""
def _join_lines(a):
i = 0
while i < len(a):
x = a[i]
xend = x[-1]
xstart = x[0]
hit = True
while hit:
hit = False
for j in range(i + 1, len(a)):
if j >= len(a):
break
if a[j][0] == xend:
hit = True
x.extend(a[j][1:])
xend = x[-1]
a.pop(j)
continue
if a[j][0] == xstart:
hit = True
a[i] = reversed(a[j][1:]) + x
x = a[i]
xstart = a[i][0]
a.pop(j)
continue
if a[j][-1] == xend:
hit = True
x.extend(reversed(a[j][:-1]))
xend = x[-1]
a.pop(j)
continue
if a[j][-1] == xstart:
hit = True
a[i] = a[j][:-1] + x
x = a[i]
xstart = x[0]
a.pop(j)
continue
i += 1
return a
tids = ex._tids
components = tids.components
dt = {}
for c in components:
if c in dt:
continue
index_types = c.index_types
a = []
for i in range(len(index_types)):
if index_types[i] is index_type:
a.append(i)
if len(a) > 2:
raise ValueError('at most two indices of type %s allowed' % index_type)
if len(a) == 2:
dt[c] = a
dum = tids.dum
lines = []
traces = []
traces1 = []
for p0, p1, c0, c1 in dum:
if components[c0] not in dt:
continue
if c0 == c1:
traces.append([c0])
continue
ta0 = dt[components[c0]]
ta1 = dt[components[c1]]
if p0 not in ta0:
continue
if ta0.index(p0) == ta1.index(p1):
# case gamma(i,s0,-s1)in c0, gamma(j,-s0,s2) in c1;
# to deal with this case one could add to the position
# a flag for transposition;
# one could write [(c0, False), (c1, True)]
raise NotImplementedError
# if p0 == ta0[1] then G in pos c0 is mult on the right by G in c1
# if p0 == ta0[0] then G in pos c1 is mult on the right by G in c0
ta0 = dt[components[c0]]
b0, b1 = (c0, c1) if p0 == ta0[1] else (c1, c0)
lines1 = lines[:]
for line in lines:
if line[-1] == b0:
if line[0] == b1:
n = line.index(min(line))
traces1.append(line)
traces.append(line[n:] + line[:n])
else:
line.append(b1)
break
elif line[0] == b1:
line.insert(0, b0)
break
else:
lines1.append([b0, b1])
lines = [x for x in lines1 if x not in traces1]
lines = _join_lines(lines)
rest = []
for line in lines:
for y in line:
rest.append(y)
for line in traces:
for y in line:
rest.append(y)
rest = [x for x in range(len(components)) if x not in rest]
return lines, traces, rest
def get_indices(t):
if not isinstance(t, TensExpr):
return ()
return t.get_indices()
def get_tids(t):
if isinstance(t, TensExpr):
return t._tids
return TIDS([], [], [])
def get_coeff(t):
if isinstance(t, Tensor):
return S.One
if isinstance(t, TensMul):
return t.coeff
if isinstance(t, TensExpr):
raise ValueError("no coefficient associated to this tensor expression")
return t
def contract_metric(t, g):
if isinstance(t, TensExpr):
return t.contract_metric(g)
return t
def perm2tensor(t, g, canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
if not isinstance(t, TensExpr):
return t
new_tids = get_tids(t).perm2tensor(g, canon_bp)
coeff = get_coeff(t)
if g[-1] != len(g) - 1:
coeff = -coeff
res = TensMul.from_TIDS(coeff, new_tids, is_canon_bp=canon_bp)
return res
def substitute_indices(t, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
``index_types`` list of tuples ``(old_index, new_index)``
Note: this method will neither raise or lower the indices, it will just replace their symbol.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j); t
A(i, L_0)*B(-L_0, -j)
>>> t.substitute_indices((i,j), (j, k))
A(j, L_0)*B(-L_0, -k)
"""
if not isinstance(t, TensExpr):
return t
free = t.free
free1 = []
for j, ipos, cpos in free:
for i, v in index_tuples:
if i._name == j._name and i._tensortype == j._tensortype:
if i._is_up == j._is_up:
free1.append((v, ipos, cpos))
else:
free1.append((-v, ipos, cpos))
break
else:
free1.append((j, ipos, cpos))
t = TensMul.from_data(t.coeff, t.components, free1, t.dum)
return t
|
the-stack_106_22313 | import argparse
from installer.parallel_installer import ParallelInstaller
from typing import Dict, Any
import json
import sys
def build_dependency_json(requirements_json_path: str) -> Dict[str, Any]:
"""
Returns a dictionary representation of the JSON object stored in the
requirements file.
"""
try:
with open(requirements_json_path, "r") as fh:
try:
requirements_json = json.loads(fh.read())
return requirements_json
except:
print(f"{requirements_json_path} does not appear to be valid JSON.")
sys.exit(1)
except:
print(f"Unable to read from {requirements_json_path}")
sys.exit(1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--threads",
default=1,
type=int,
help="number of threads used to install packages",
)
parser.add_argument(
"--requirements-path",
required=True,
type=str,
help="/path/to/requirements.json (the output of `pipdeptree --json-tree`)",
)
parser.add_argument(
"--pip-path",
default="/usr/bin/pip3",
type=str,
help="/path/to/python-pip (e.g. /usr/bin/pip3)",
)
parser.add_argument(
"--target",
type=str,
help="target path to install python packages (e.g. /path/to/target)",
)
parser.add_argument(
"--skip-dependency-check",
action="store_true",
help="If specified, pip will install with the --no-deps flag.",
)
parser.add_argument(
"--no-cache-dir",
action="store_true",
help="If specified, pip will install with the --no-cache-dir flag."
)
args = parser.parse_args()
requirements_json = build_dependency_json(args.requirements_path)
installer = ParallelInstaller(
requirements_json=requirements_json,
threads=args.threads,
pip_path=args.pip_path,
target=args.target,
skip_dependency_check=args.skip_dependency_check,
no_cache_dir=args.no_cache_dir,
)
installer.install()
if __name__ == "__main__":
main()
|
the-stack_106_22314 | from kubernetes import client, config, watch
import os
import sys
import requests
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
def writeTextToFile(folder, filename, data):
with open(folder +"/"+ filename, 'w') as f:
f.write(data)
f.close()
def request(url, method, payload):
r = requests.Session()
retries = Retry(total = 5,
connect = 5,
backoff_factor = 0.2,
status_forcelist = [ 500, 502, 503, 504 ])
r.mount('http://', HTTPAdapter(max_retries=retries))
r.mount('https://', HTTPAdapter(max_retries=retries))
if url is None:
print("No url provided. Doing nothing.")
# If method is not provided use GET as default
elif method == "GET" or method is None:
res = r.get("%s" % url, timeout=10)
print ("%s request sent to %s. Response: %d %s" % (method, url, res.status_code, res.reason))
elif method == "POST":
res = r.post("%s" % url, json=payload, timeout=10)
print ("%s request sent to %s. Response: %d %s" % (method, url, res.status_code, res.reason))
def removeFile(folder, filename):
completeFile = folder +"/"+filename
if os.path.isfile(completeFile):
os.remove(completeFile)
else:
print("Error: %s file not found" % completeFile)
def watchForChanges(label, targetFolder, url, method, payload):
v1 = client.CoreV1Api()
w = watch.Watch()
stream = None
namespace = os.getenv("NAMESPACE")
if namespace is None:
stream = w.stream(v1.list_config_map_for_all_namespaces)
else:
stream = w.stream(v1.list_namespaced_config_map, namespace=namespace)
for event in stream:
metadata = event['object'].metadata
if metadata.labels is None:
continue
print(f'Working on configmap {metadata.namespace}/{metadata.name}')
if label in event['object'].metadata.labels.keys():
print("Configmap with label found")
dataMap=event['object'].data
if dataMap is None:
print("Configmap does not have data.")
continue
eventType = event['type']
for filename in dataMap.keys():
print("File in configmap %s %s" % (filename, eventType))
if (eventType == "ADDED") or (eventType == "MODIFIED"):
writeTextToFile(targetFolder, filename, dataMap[filename])
if url is not None:
request(url, method, payload)
else:
removeFile(targetFolder, filename)
if url is not None:
request(url, method, payload)
def main():
print("Starting config map collector")
label = os.getenv('LABEL')
if label is None:
print("Should have added LABEL as environment variable! Exit")
return -1
targetFolder = os.getenv('FOLDER')
if targetFolder is None:
print("Should have added FOLDER as environment variable! Exit")
return -1
method = os.getenv('REQ_METHOD')
url = os.getenv('REQ_URL')
payload = os.getenv('REQ_PAYLOAD')
config.load_incluster_config()
print("Config for cluster api loaded...")
watchForChanges(label, targetFolder, url, method, payload)
if __name__ == '__main__':
main()
|
the-stack_106_22315 | # -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
from __future__ import print_function
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
PACKAGE_NAME = "fn_qradar_asset"
FUNCTION_NAME = "qradar_asset_query"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_qradar_asset_query_function(circuits, function_params, timeout=10):
# Fire a message to the function
evt = SubmitTestFunction("qradar_asset_query", function_params)
circuits.manager.fire(evt)
event = circuits.watcher.wait("qradar_asset_query_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestQradarAssetQuery:
""" Tests for the qradar_asset_query function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@pytest.mark.parametrize("incident_id, expected_results", [
(123, {"value": "xyz"}),
(123, {"value": "xyz"})
])
def test_success(self, circuits_app, incident_id, expected_results):
""" Test calling with sample values for the parameters """
function_params = {
"incident_id": incident_id
}
results = call_qradar_asset_query_function(circuits_app, function_params)
assert(expected_results == results) |
the-stack_106_22316 | # -*- coding: utf-8 -*-
from hearthstone.entities import Entity
from entity.spell_entity import SpellEntity
class LETL_030P3(SpellEntity):
"""
热力迸发
造成12点伤害。下回合你的火焰技能速度值加快(1)点
"""
def __init__(self, entity: Entity):
super().__init__(entity)
self.damage = 12
self.range = 1
def play(self, hero, target):
power = self.game_entity.get_spell_power(self.spell_school, hero.own)
target.got_damage((self.damage + power) * self.damage_advantage[self.lettuce_role][target.lettuce_role])
|
the-stack_106_22318 | #!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from pktverify.addrs import Ipv6Addr
from pktverify.bytes import Bytes
DOMAIN_PREFIX = Bytes('fd00:7d03:7d03:7d03')
BACKBONE_IPV6_PREFIX = Bytes('91')
LINK_LOCAL_All_THREAD_NODES_MULTICAST_ADDRESS = Ipv6Addr('ff32:40:fd00:db8::1')
REALM_LOCAL_All_THREAD_NODES_MULTICAST_ADDRESS = Ipv6Addr('ff33:40:fd00:db8::1')
REALM_LOCAL_ALL_NODES_ADDRESS = Ipv6Addr('ff03::1')
REALM_LOCAL_ALL_ROUTERS_ADDRESS = Ipv6Addr('ff03::2')
LINK_LOCAL_ALL_NODES_MULTICAST_ADDRESS = Ipv6Addr('ff02::1')
LINK_LOCAL_ALL_ROUTERS_MULTICAST_ADDRESS = Ipv6Addr('ff02::2')
LINK_LOCAL_ALL_BBRS_MULTICAST_ADDRESS = Ipv6Addr('ff32:40:fd00:7d03:7d03:7d03:0:3')
# MA in Test Plan, make sure these are same as ../config.py
MA1 = Ipv6Addr('ff04::1234:777a:1')
MA1g = Ipv6Addr('ff0e::1234:777a:1')
MA2 = Ipv6Addr('ff05::1234:777a:1')
MA3 = Ipv6Addr('ff0e::1234:777a:3')
MA4 = Ipv6Addr('ff05::1234:777a:4')
MA5 = Ipv6Addr('ff03::1234:777a:5')
MA6 = Ipv6Addr('ff02::1')
MAe1 = Ipv6Addr('fd0e::1234:777a:1')
MAe2 = Ipv6Addr('::')
MAe3 = Ipv6Addr('cafe::e0ff')
ALL_MPL_FORWARDERS_MA = Ipv6Addr('ff03::fc')
LINK_LOCAL_PREFIX = Bytes("fe80")
DEFAULT_MESH_LOCAL_PREFIX = Bytes("fd00:0db8:0000:0000")
# WPAN CMDs
WPAN_DATA_REQUEST = 4
# WPAN Frame Types
WPAN_BEACON = 0
WPAN_DATA = 1
WPAN_ACK = 2
WPAN_CMD = 3
# COAP methods
COAP_CODE_POST = 2
COAP_CODE_ACK = 68
MLE_LINK_REQUEST = 0
MLE_LINK_ACCEPT = 1
MLE_LINK_ACCEPT_AND_REQUEST = 2
MLE_ADVERTISEMENT = 4
MLE_DATA_REQUEST = 7
MLE_DATA_RESPONSE = 8
MLE_PARENT_REQUEST = 9
MLE_PARENT_RESPONSE = 10
MLE_CHILD_ID_REQUEST = 11
MLE_CHILD_ID_RESPONSE = 12
MLE_CHILD_UPDATE_REQUEST = 13
MLE_CHILD_UPDATE_RESPONSE = 14
MLE_ANNOUNCE = 15
MLE_DISCOVERY_REQUEST = 16
MLE_DISCOVERY_RESPONSE = 17
MLE_LINK_METRICS_MANAGEMENT_REQUEST = 18
MLE_LINK_METRICS_MANAGEMENT_RESPONSE = 19
MLE_LINK_PROBE = 20
# COAP URIs
ADDR_QRY_URI = '/a/aq'
ADDR_NTF_URI = '/a/an'
ADDR_ERR_URI = '/a/ae'
ADDR_SOL_URI = '/a/as'
ADDR_REL_URI = '/a/ar'
SVR_DATA_URI = '/a/sd'
ND_DATA_URI = '/a/nd'
RLY_RX_URI = '/c/rx'
RLY_TX_URI = '/c/tx'
MGMT_PANID_QUERY = '/c/pq'
MGMT_PANID_CONFLICT = '/c/pc'
MGMT_ED_SCAN = '/c/es'
MGMT_ED_REPORT = '/c/er'
MGMT_ACTIVE_GET_URI = '/c/ag'
MGMT_ACTIVE_SET_URI = '/c/as'
MGMT_PENDING_SET_URI = '/c/ps'
MGMT_DATASET_CHANGED_URI = '/c/dc'
DIAG_GET_URI = '/d/dg'
DIAG_RST_URI = '/d/dr'
# ADDR SOL Status
ADDR_SOL_SUCCESS = 0
ADDR_SOL_NA = 1
# MLE TLVs
SOURCE_ADDRESS_TLV = 0
MODE_TLV = 1
TIMEOUT_TLV = 2
CHALLENGE_TLV = 3
RESPONSE_TLV = 4
LINK_LAYER_FRAME_COUNTER_TLV = 5
LINK_QUALITY_TLV = 6
PARAMETER_TLV = 7
MLE_FRAME_COUNTER_TLV = 8
ROUTE64_TLV = 9
ADDRESS16_TLV = 10
LEADER_DATA_TLV = 11
NETWORK_DATA_TLV = 12
TLV_REQUEST_TLV = 13
SCAN_MASK_TLV = 14
CONNECTIVITY_TLV = 15
LINK_MARGIN_TLV = 16
STATUS_TLV = 17
VERSION_TLV = 18
ADDRESS_REGISTRATION_TLV = 19
CHANNEL_TLV = 20
PAN_ID_TLV = 21
ACTIVE_TIMESTAMP_TLV = 22
PENDING_TIMESTAMP_TLV = 23
ACTIVE_OPERATION_DATASET_TLV = 24
PENDING_OPERATION_DATASET_TLV = 25
THREAD_DISCOVERY_TLV = 26
CSL_SYNCHRONIZED_TIMEOUT = 85
# Network Layer TLVs
NL_TARGET_EID_TLV = 0
NL_MAC_EXTENDED_ADDRESS_TLV = 1
NL_RLOC16_TLV = 2
NL_ML_EID_TLV = 3
NL_STATUS_TLV = 4
NL_TIME_SINCE_LAST_TRANSACTION_TLV = 6
NL_ROUTER_MASK_TLV = 7
NL_ND_OPTION_TLV = 8
NL_ND_DATA_TLV = 9
NL_THREAD_NETWORK_DATA_TLV = 10
# Network Layer Status
NL_SUCESS = 0
NL_NO_ADDRESS_AVAILABLE = 1
NL_TOO_FEW_ROUTERS = 2
NL_HAVE_CHILD_ID_REQUEST = 3
NL_PARENT_PARTITION_CHANGE = 4
# MeshCop TLVs
NM_CHANNEL_TLV = 0
NM_PAN_ID_TLV = 1
NM_EXTENDED_PAN_ID_TLV = 2
NM_NETWORK_NAME_TLV = 3
NM_PSKC_TLV = 4
NM_NETWORK_MASTER_KEY_TLV = 5
NM_NETWORK_KEY_SEQUENCE_COUNTER_TLV = 6
NM_NETWORK_MESH_LOCAL_PREFIX_TLV = 7
NM_STEERING_DATA_TLV = 8
NM_BORDER_AGENT_LOCATOR_TLV = 9
NM_COMMISSIONER_ID_TLV = 10
NM_COMMISSIONER_SESSION_ID_TLV = 11
NM_SECURITY_POLICY_TLV = 12
NM_ACTIVE_TIMESTAMP_TLV = 14
NM_COMMISSIONER_UDP_PORT_TLV = 15
NM_JOINER_UDP_PORT_TLV = 18
NM_DELAY_TIMER_TLV = 52
NM_CHANNEL_MASK_TLV = 53
NM_ENERGY_LIST_TLV = 57
NM_DISCOVERY_REQUEST_TLV = 128
NM_DISCOVERY_RESPONSE_TLV = 129
# Diagnostic TLVs
DG_MAC_EXTENDED_ADDRESS_TLV = 0
DG_MAC_ADDRESS_TLV = 1
DG_MODE_TLV = 2
DG_TIMEOUT_TLV = 3
DG_CONNECTIVITY_TLV = 4
DG_ROUTE64_TLV = 5
DG_LEADER_DATA_TLV = 6
DG_NETWORK_DATA_TLV = 7
DG_IPV6_ADDRESS_LIST_TLV = 8
DG_MAC_COUNTERS_TLV = 9
DG_BATTERY_LEVEL_TLV = 14
DG_SUPPLY_VOLTAGE_TLV = 15
DG_CHILD_TABLE_TLV = 16
DG_CHANNEL_PAGES_TLV = 17
DG_TYPE_LIST_TLV = 18
DG_MAX_CHILD_TIMEOUT_TLV = 19
# DTLS
HANDSHAKE_HELLO_REQUEST = 0
HANDSHAKE_CLIENT_HELLO = 1
HANDSHAKE_SERVER_HELLO = 2
HANDSHAKE_HELLO_VERIFY_REQUEST = 3
HANDSHAKE_CERTIFICATE = 11
HANDSHAKE_SERVER_KEY_EXCHANGE = 12
HANDSHAKE_CERTIFICATE_REQUEST = 13
HANDSHAKE_SERVER_HELLO_DONE = 14
HANDSHAKE_CERTIFICATE_VERIFY = 15
HANDSHAKE_CLIENT_KEY_EXCHANGE = 16
HANDSHAKE_FINISHED = 20
CONTENT_CHANGE_CIPHER_SPEC = 20
CONTENT_ALERT = 21
CONTENT_HANDSHAKE = 22
CONTENT_APPLICATION_DATA = 23
# Network Data TLVs
NWD_HAS_ROUTER_TLV = 0
NWD_PREFIX_TLV = 1
NWD_BORDER_ROUTER_TLV = 2
NWD_6LOWPAN_ID_TLV = 3
NWD_SERVICE_TLV = 4
NWD_SERVER_TLV = 5
NWD_COMMISSIONING_DATA_TLV = 6
# DUA related constants
ADDRESS_QUERY_INITIAL_RETRY_DELAY = 15
ADDRESS_QUERY_MAX_RETRY_DELAY = 8
ADDRESS_QUERY_TIMEOUT = 3
ADVERTISEMENT_I_MAX = 32
ADVERTISEMENT_I_MIN = 1
CONTEXT_ID_REUSE_DELAY = 48
DATA_RESUBMIT_DELAY = 300
DUA_DAD_PERIOD = 100
DUA_DAD_QUERY_TIMEOUT = 1.0
DUA_DAD_REPEATS = 2
DUA_RECENT_TIME = 20
FAILED_ROUTER_TRANSMISSIONS = 4
ID_REUSE_DELAY = 100
ID_SEQUENCE_PERIOD = 10
INFINITE_COST_TIMEOUT = 90
REAL_LAYER_NAMES = {
'mle',
'coap',
'dtls',
'wpan',
'eth',
'tcp',
'udp',
'ip',
'ipv6',
'icmpv6',
'6lowpan',
'arp',
'thread_bl',
'thread_address',
'thread_diagnostic',
'thread_nm',
'ssdp',
'dns',
'igmp',
'mdns',
}
FAKE_LAYER_NAMES = {'thread_nwd', 'thread_meshcop'}
VALID_LAYER_NAMES = REAL_LAYER_NAMES | FAKE_LAYER_NAMES
AUTO_SEEK_BACK_MAX_DURATION = 0.01
# Wireshark configs
WIRESHARK_OVERRIDE_PREFS = {
'6lowpan.context0':
'fd00:db8::/64',
'6lowpan.context1':
'fd00:7d03:7d03:7d03::/64',
'wpan.802154_fcs_ok':
'FALSE',
'wpan.802154_sec_suite':
'AES-128 Encryption, 32-bit Integrity Protection',
'thread.thr_seq_ctr':
'00000000',
'uat:ieee802154_keys':
'''"00112233445566778899aabbccddeeff","1","Thread hash"
"ffeeddccbbaa99887766554433221100","1","Thread hash"''',
}
WIRESHARK_DECODE_AS_ENTRIES = {
'udp.port==61631': 'coap',
}
TIMEOUT_JOIN_NETWORK = 10
TIMEOUT_DUA_REGISTRATION = 10
TIMEOUT_DUA_DAD = 15
TIMEOUT_HOST_READY = 10
TIMEOUT_CHILD_DETACH = 120
TIMEOUT_REGISTER_MA = 5
# 802.15.4 Frame Version
MAC_FRAME_VERSION_2006 = 1
MAC_FRAME_VERSION_2015 = 2
# CSL
CSL_DEFAULT_PERIOD = 3125 # 0.5s, 3125 in units of ten symbols
CSL_DEFAULT_PERIOD_IN_SECOND = 0.5
US_PER_TEN_SYMBOLS = 160
CSL_IE_ID = 0x1a
# Thread Version TLV value
THREAD_VERSION_1_2 = 3
# ICMPv6 Types
ICMPV6_TYPE_DESTINATION_UNREACHABLE = 1
# Link Metrics
LINK_METRICS_STATUS_SUCCESS = 0
LINK_METRICS_STATUS_CANNOT_SUPPORT_NEW_SERIES = 1
LINK_METRICS_STATUS_SERIES_ID_ALREADY_REGISTERED = 2
LINK_METRICS_STATUS_SERIES_ID_NOT_RECOGNIZED = 3
LINK_METRICS_STATUS_NO_MATCHING_FRAMES_RECEIVED = 4
LINK_METRICS_STATUS_OTHER_ERROR = 254
LINK_METRICS_TYPE_AVERAGE_ENUM_COUNT = 0
LINK_METRICS_TYPE_AVERAGE_ENUM_EXPONENTIAL = 1
LINK_METRICS_METRIC_TYPE_ENUM_PDU_COUNT = 0
LINK_METRICS_METRIC_TYPE_ENUM_LQI = 1
LINK_METRICS_METRIC_TYPE_ENUM_LINK_MARGIN = 2
LINK_METRICS_METRIC_TYPE_ENUM_RSSI = 3
if __name__ == '__main__':
from pktverify.addrs import Ipv6Addr
assert Ipv6Addr("fe80:0000:0000:0000:0200:0000:0000:0004").startswith(LINK_LOCAL_PREFIX)
assert Ipv6Addr("fd00:0db8:0000:0000:0000:00ff:fe00:8001").startswith(DEFAULT_MESH_LOCAL_PREFIX)
|
the-stack_106_22320 | import argparse
import codecs
import logging
import random
from collections import defaultdict as ddict
import nltk
from nltk.stem.wordnet import WordNetLemmatizer
random.seed(13370)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def get_triples_from_file(filename):
entity_pair_relations = ddict(set)
for line in codecs.open(filename, "r", "utf-8"):
fields = line.split(',')
source = fields[0]
relation = fields[1]
target = fields[2]
entity_pair_relations[(source, target)].add(relation)
return entity_pair_relations
def create_type_dict(filename):
# Note that this depends on form of the given tensor and where each column is.
# The format of input CSV file is source,relation,target, in column 0,1,2 and
# source_type,target_type in columns
# 8,9, respectively.
entities = set()
type_entity_dict = ddict(set)
entity_type_dict = ddict(set)
for line in codecs.open(filename, "r", "utf-8"):
fields = line.strip().split(',')
source = fields[0]
target = fields[2]
source_type = fields[8]
target_type = fields[9]
type_entity_dict[source_type].add(source)
entity_type_dict[source].add(source_type)
type_entity_dict[target_type].add(target)
entity_type_dict[target].add(target_type)
entities.add(source)
entities.add(target)
return type_entity_dict, entity_type_dict, entities
def find_replacement(location1, location2, words, type_entity_dict, entity_type_dict,
entity_pair_relations, input_sentence):
# The method receives the sentence in words list and takes the location of the two words that
# we want to consider Given these two words, the goal is to generate two sets of perturbations,
# by replacing one of these words at a time Let us call these two, as word1, word2. We look
# into triple KB and find a list of predicates->predicate_list that connect word1,word2. note
# that we consider both orders Then, given word1, we look into all the words that have the same
# type(type1) and make a list of the words that have type1 but never appeared with word2 and
# any of the predicates in the KB. These are our candidate replacements. The same approach is
# repeated for replacing word2.
lemmatizer = WordNetLemmatizer()
replacement_list = []
negative_sentences_per_sentence = []
lemma1 = lemmatizer.lemmatize(words[location1])
lemma2 = lemmatizer.lemmatize(words[location2])
for type1 in entity_type_dict[lemma1]:
for type2 in entity_type_dict[lemma2]:
predicate_list = entity_pair_relations[(lemma1, lemma2)]
for candidate_item in type_entity_dict[type1]:
if len(entity_pair_relations[(candidate_item, lemma2)].intersection(predicate_list)) == 0:
replacement_list.append((candidate_item, words[location2]))
for candidate_item in type_entity_dict[type2]:
if len(entity_pair_relations[(lemma1, candidate_item)].intersection(predicate_list)) == 0:
replacement_list.append((words[location1], candidate_item))
for (replacement1, replacement2) in replacement_list:
new_sentence = input_sentence.replace(words[location1], replacement1)
new_sentence = new_sentence.replace(words[location2], replacement2)
negative_sentences_per_sentence.append(new_sentence)
return negative_sentences_per_sentence
def create_negative_sentence(input_sentence, entities, type_entity_dict, entity_type_dict,
entity_pair_relations):
lemmatizer = WordNetLemmatizer()
words = nltk.word_tokenize(input_sentence)
negative_sentences = []
for i in range(len(words)): # pylint: disable=consider-using-enumerate
if lemmatizer.lemmatize(words[i]) in entities:
for j in range(i + 1, len(words)):
if lemmatizer.lemmatize(words[j]) in entities:
negative_sentences.extend(
find_replacement(i, j, words, type_entity_dict, entity_type_dict,
entity_pair_relations, input_sentence))
negative_sentences.extend(
find_replacement(j, i, words, type_entity_dict, entity_type_dict,
entity_pair_relations, input_sentence))
return negative_sentences
def main():
'''Takes as input a list of sentences and a KB, and produces as output a collection of
corrupted sentences.
The input sentences are assumed formatted as one sentence per line, possibly with an index:
either "[sentence]" or "[sentence id][tab][sentence".
The input KB format is described in the comment to create_type_dict.
The output format is a tab-separated list of corruptions per sentence. For every sentence for
which we found a corruption, we output a line formatted as "[sentence][tab][sentence][tab]...".
Sentences for which we found no corruption are just skipped.
'''
argparser = argparse.ArgumentParser(description="Perturb sentences using KB and type information")
argparser.add_argument("--input_file", type=str,
help="File with sentences to perturb, one per line.")
argparser.add_argument("--output_file", type=str,
help="File with purturbed sentences along with an id, one per line.")
argparser.add_argument("--kb_tensor_file", type=str,
help="input KB tensor in csv format with type information,one per line.")
argparser.add_argument("--num_perturbation", type=int, default=20,
help="no. of word replacements per word combination in sentence, default=20")
argparser.add_argument("--max_sentences", type=int,
help="Only do this many sentences from --input_file")
args = argparser.parse_args()
entity_pair_relations = get_triples_from_file(args.kb_tensor_file)
type_entity_dict, entity_type_dict, entities = create_type_dict(args.kb_tensor_file)
negative_sentences = []
index = 0
for line in codecs.open(args.input_file, "r", "utf-8"):
index += 1
if index % 1000 == 0:
logger.info(index)
if args.max_sentences and index > args.max_sentences:
break
if '\t' in line:
(sentence_index, input_sentence) = line.strip().split('\t')
else:
sentence_index = None
input_sentence = line.strip()
input_sentence = input_sentence.lower()
negatives = create_negative_sentence(input_sentence, entities, type_entity_dict,
entity_type_dict, entity_pair_relations)
if negatives:
random.shuffle(negatives)
negative_sentences.append((sentence_index, negatives[:args.num_perturbation]))
with codecs.open(args.output_file, 'w', 'utf-8') as out_file:
for sentence_index, sentences in negative_sentences:
if sentence_index is not None:
out_file.write(sentence_index + '\t' + '\t'.join(sentences) + '\n')
else:
out_file.write('\t'.join(sentences) + '\n')
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
main()
|
the-stack_106_22321 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
plt.style.use('ggplot')
import arch
from arch.unitroot import ADF
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.seasonal import seasonal_decompose
import os
import datetime as dt
#
# dff_df_R001 = df.diff()
# adf = ADF(df_R001.dropna())
# print(adf.summary().as_text())
# adf.lags=1
# plot_acf(df_R001, lags=25, alpha=0.5)#自相关系数ACF图
# plot_pacf(df_R001, lags=25, alpha=0.5)#偏相关系数PACF图
#
# adf.lags = 4
#
# reg_res = adf.regression
# print(reg_res.summary().as_text())
#
# type(reg_res)
class TSAnalysis(object):
def plot_trend(self, df_ts, size):
ax = plt.subplot()
# 对size个数据进行移动平均
rol_mean = df_ts.rolling(window=size).mean()
# 对size个数据进行加权移动平均
rol_weighted_mean = df_ts.ewm(span=size).mean()
df_ts.plot(color='blue', label='Original', ax=ax)
rol_mean.plot(color='red', label='Rolling Mean', ax=ax)
rol_weighted_mean.plot(color='black', label='Weighted Rolling Mean', ax=ax)
plt.legend(loc='best')
plt.title('Rolling Mean')
plt.show()
def plot_ts(self, df_ts):
ax = plt.subplot()
df_ts.plot(color='blue', ax=ax)
plt.show()
def ADF_test(self, df_ts, lags=None):
if lags == 'None':
try:
adf = ADF(df_ts)
except:
adf = ADF(df_ts.dropna())
else:
try:
adf = ADF(df_ts)
except:
adf = ADF(df_ts.dropna())
adf.lags = lags
print(adf.summary().as_text())
return adf
def plot_acf_pacf(self, df_ts, lags=31):
f = plt.figure(facecolor='white', figsize=(12, 8))
ax1 = f.add_subplot(211)
plot_acf(df_ts, lags=31, ax=ax1)
ax2 = f.add_subplot(212)
plot_pacf(df_ts, lags=31, ax=ax2)
plt.show()
if __name__ == '__main__':
print(os.getcwd())
RU = pd.read_excel('/home/nealzc1991/PycharmProjects/Py4Invst/Fundamental/RU.xls')
RU.dropna(inplace=True)
df = pd.DataFrame(columns=['Date', 'Close'])
df.loc[:, 'Date'] = RU.loc[:, 'Date']
df.loc[:, 'Close'] = RU.loc[:, 'Close']
df.set_index('Date', inplace=True)
df_RU = df.loc[dt.datetime(2015, 7, 31):dt.datetime(2016, 7, 29), :]
a = TSAnalysis()
adf = a.ADF_test(df.apply(np.log).diff(1).dropna())
a.plot_acf_pacf(df.apply(np.log).diff(1).dropna())
df_diff_1 = df.apply(np.log).diff(1).dropna()
from statsmodels.tsa.arima_model import ARMA
model = ARMA(df_diff_1, order=(1, 1))
result_arma = model.fit(disp=-1, method='css')
predict_ts = result_arma.predict()
diff_shift_ts = df_diff_1.shift(1).dropna()
diff_recover_1 = predict_ts + diff_shift_ts.loc[:,'Close'] |
the-stack_106_22323 | #!/usr/bin/env python
"""Analysis of alignments for EdiTyper"""
from __future__ import division
from __future__ import print_function
import sys
PYTHON_VERSION = sys.version_info.major
import os
import time
import logging
import itertools
from math import floor, ceil
from collections import Counter, defaultdict, namedtuple
if PYTHON_VERSION == 2:
import toolkit
from itertools import imap as map
range = xrange
elif PYTHON_VERSION == 3:
from edityper import toolkit
else:
raise SystemExit("Please use Python 2 or 3 for this module: " + __name__)
_DISP_BREAK = '-----------------------------------------------------------------------------------------'
NA = 'NA'
Reporter = namedtuple('Reporter', ('deletions', 'insertions', 'mismatches', 'matches'))
def _filter_to_dict(filtered_dict): # type: (Iterable[Tuple[Any, Any]]) -> Dict[Any, Any]
return {pair[0]: pair[1] for pair in filtered_dict}
def _fastq_header(fastq_name, fastq_path): # type: (str, str) -> str
header = ( # type: Tuple[str]
'##FASTQ',
'Name:%s' % fastq_name,
'Path:%s' % fastq_path
)
return '\t'.join(header)
def _snp_header(snp_info): # type: (SNP) -> str
header = ( # type: Tuple[str]
'##SNP',
'POS:%s' % (snp_info.position + 1),
'REF:%s' % snp_info.reference,
'TEMPLATE:%s' % snp_info.target
)
return '\t'.join(header)
def cummulative_deletions(deletions): # type: Dict[int, List[int]] -> Dict[int, int]
"""Calculate cummulative deletions"""
cummul_del = defaultdict(int) # type: defaultdict
for position, dist in deletions.items(): # type: int, List[int]
for length in dist: # type: int
for i in range(length): # type: int
cummul_del[position + i] += 1
return dict(cummul_del)
def calc_coverage(cummul_del, mismatches, matches): # type: (Dict[int, int], Dict[int, List[str]], Dict[int, int]) -> Dict[int, int]
"""Calculate coverage"""
coverage = defaultdict(int) # type: defaultdict
for base, cummul_count in cummul_del.items(): # type: int, int
coverage[base] += cummul_count
for base, mismatch_list in mismatches.items(): # type: int, List[str]
coverage[base] += len(mismatch_list)
for base, match_count in matches.items(): # type: int, int
coverage[base] += match_count
return coverage
def percent(num, total): # type: (int, int) -> float
"""Calculate a percent"""
perc = num * 100 / total if total is not 0 else 0
return float(round(perc, 2))
def summarize(data, rounding=None): # type: (Iterable[Union[int, float]], Optional[int]) -> Union[int, float], float, float
'''Get the sum, mean, and standard deviation of a collection of data'''
total = sum(data) # type: Union[int, float]
avg = toolkit.mean(x=data) # type: float
std = toolkit.stdev(x=data) # type: float
if rounding:
avg = round(avg, rounding)
std = round(std, rounding)
return total, float(avg), float(std)
def events_report(
fastq_name, # type: str
fastq_path, # type: str
events, # type: Dict[str, defaultdict]
cummul_del, # type: Dict[int, int]
coverage, # type: Dict[int, int]
reference, # type: str
snp_info, # type: Optional[SNP]
output_prefix # type: str
):
# type: (...) -> None
"""Create the events table"""
logging.info("FASTQ %s: Creating events table...", fastq_name)
events_start = time.time() # type: float
# Header information
header = ( # type: Tuple[str]
'#POS',
'REF',
'COV',
'DEL',
'AVG_DEL',
'DCOUNT',
'INS',
'AVG_INS',
'A',
'T',
'C',
'G'
)
# Create output file
output_name = os.path.join(output_prefix, fastq_name + '.events.txt')
with open(output_name, 'w') as efile:
logging.info("FASTQ %s: Writing events table to %s", fastq_name, output_name)
efile.write(_fastq_header(fastq_name=fastq_name, fastq_path=fastq_path) + '\n')
if snp_info:
efile.write(_snp_header(snp_info=snp_info) + '\n')
efile.write('\t'.join(header) + '\n')
efile.flush()
for index, base in enumerate(reference):
# Get the mismatches
try:
nucleotides = dict(Counter(events['mismatches'][index])) # type: Dict[str, int]
except KeyError:
nucleotides = dict.fromkeys(('A', 'C', 'G', 'T'), 0) # type: Dict[str, int]
# Get deletions
deletions = events['deletions'].get(index, []) # type: List[int]
deletion_count = len(deletions) # type: int
avg_deletion = toolkit.mean(x=deletions) # type: float
# Get insertions
insertions = events['insertions'].get(index, []) # type: List[int]
insertion_count = len(insertions) # type: int
avg_insertion = toolkit.mean(x=insertions) # type: float
# Matches
nucleotides[base] = events['matches'].get(index, 0) # type: int
# Assemble and write
results = ( # type: Tuple[Any]
index + 1,
base,
coverage.get(index, 0),
deletion_count,
round(avg_deletion, 2),
cummul_del.get(index, 0),
insertion_count,
round(avg_insertion, 2),
nucleotides.get('A', 0),
nucleotides.get('T', 0),
nucleotides.get('C', 0),
nucleotides.get('G', 0)
)
results = map(str, results) # type: Tuple[str]
efile.write('\t'.join(results))
efile.write('\n')
efile.flush()
logging.debug("FASTQ %s: Creating events table took %s seconds", fastq_name, round(time.time() - events_start, 3))
def display_classification(
fastq_name, # type: str
fastq_path, # type: str
classifications, # type: Tuple[Dict[str, Events]]
unique_reads, # type: Mapping[str, int]
snp_info, # type: Optional[SNP]
fwd_score, # type: float
rev_score, # type: float
score_threshold, # type: float
output_prefix # type: str
):
# type: (...) -> (int, Dict[str, int])
"""Display the report of the read classifification"""
# Make some headers for the display
class_header = "################################################"
pre_repeat = int(floor((len(class_header) - len(fastq_name)) / 2)) # type: int
post_repeat = int(ceil((len(class_header) - len(fastq_name)) / 2)) # type: int
name_header = ''.join(itertools.repeat('-', pre_repeat)) + fastq_name + ''.join(itertools.repeat('-', post_repeat))
# Create an output name
output_name = os.path.join(output_prefix, fastq_name + '.classifications.txt')
logging.info("FASTQ %s: Writing full classification breakdown to %s", fastq_name, output_name)
# Quick statistics
num_unique = len(unique_reads)
total_reads = sum(unique_reads.values())
# Display our classifications
logging.warning(class_header)
logging.warning("--------------Read Classifications--------------")
logging.warning(name_header)
read_header = ( # type: Tuple[str]
'##READS',
'TOTAL:%s' % total_reads,
'UNIQUE:%s' % num_unique,
'PERC_UNIQ:%s' % percent(num=num_unique, total=total_reads)
)
score_header = ('##SCORE', 'FWD:%s' % fwd_score, 'REV:%s' % rev_score, 'THRESHOLD:%s' % score_threshold) # type: Tuple[str]
category_header = ( # type: Tuple[str]
'#TAG',
'COUNT',
'PERC_COUNT',
'INS_EVENTS',
'AVG_INS',
'STD_DEV_INS',
'DEL_EVENTS',
'AVG_DEL',
'STD_DEV_DEL',
'MISMATCH_EVENTS',
'AVG_MIS',
'STD_DEV_MIS',
'NO_INDELS',
'PERC_NO_INDELS',
'INS_ONLY',
'PERC_INS_ONLY',
'DEL_ONLY',
'PERC_DEL_ONLY',
'INDELS',
'PERC_INDELS'
)
# Five categories, and read_classification is a tuple, so need index, not names
# A dictionary of classifications, numbered for easy access
iter_tag = { # type: Dict[int, str]
0: 'HDR',
1: 'MIX',
2: 'NHEJ',
3: 'NO_EDIT',
4: 'DISCARD'
}
counted_total = 0 # type: int
# hdr_indels = 0 # type: int
total_counts = dict.fromkeys(iter_tag.values(), 0) # type: Dict[str, int]
with open(output_name, 'w') as cfile:
cfile.write(_fastq_header(fastq_name=fastq_name, fastq_path=fastq_path) + '\n')
if snp_info:
cfile.write(_snp_header(snp_info=snp_info) + '\n')
cfile.write('\t'.join(read_header) + '\n')
cfile.write('\t'.join(score_header) + '\n')
cfile.write('\t'.join(category_header) + '\n')
cfile.flush()
for index, tag in sorted(iter_tag.items(), key=lambda tup: tup[0]): # type: int, str
# Some holding values
count = 0 # type: int
event_lists = defaultdict(list) # type: defaultdict[List]
event_counts = dict.fromkeys(('none', 'deletions', 'insertions', 'indels'), 0) # type: Dict[str, int]
for event in classifications[index].values(): # type: Event
# Create summaries
event_lists['indels'].append(event.num_ins + event.num_del)
event_lists['insertions'].extend([event.num_ins] * event.num_reads)
event_lists['deletions'].extend([event.num_del] * event.num_reads)
event_lists['mismatches'].extend([event.num_mis] * event.num_reads)
if event.num_ins > 0 and event.num_del > 0:
event_counts['indels'] += event.num_reads
elif event.num_ins > 0 and event.num_del <= 0:
event_counts['insertions'] += event.num_reads
elif event.num_del > 0 and event.num_ins <= 0:
event_counts['deletions'] += event.num_reads
else:
event_counts['none'] += event.num_reads
count += event.num_reads
counted_total += event.num_reads
avg_indels = toolkit.mean(x=event_lists['indels'])
if tag == 'DISCARD':
perc_count = NA
else:
perc_count = percent(num=count, total=total_reads)
# Display our summaries
logging.warning("%s: count %s", tag, count)
logging.warning("%s: avg indels %s", tag, round(avg_indels, 3))
# Reporting for HDR/MIX and NHEJ
if tag in {iter_tag[0], iter_tag[1], iter_tag[2]}:
total_ins, avg_ins, std_ins = summarize(data=event_lists['insertions'], rounding=2)
total_del, avg_del, std_del = summarize(data=event_lists['deletions'], rounding=2)
total_mis, avg_mis, std_mis = summarize(data=event_lists['mismatches'], rounding=2)
else:
total_ins, avg_ins, std_ins = NA, NA, NA
total_del, avg_del, std_del = NA, NA, NA
total_mis, avg_mis, std_mis = NA, NA, NA
# HDR/MIX-specific reporting
if tag in {iter_tag[0], iter_tag[1]}:
none_total, perc_none = event_counts['none'], percent(num=event_counts['none'], total=count)
del_total, perc_del = event_counts['deletions'], percent(num=event_counts['deletions'], total=count)
ins_total, perc_ins = event_counts['insertions'], percent(num=event_counts['insertions'], total=count)
indel_total, perc_indel = event_counts['indels'], percent(num=event_counts['indels'], total=count)
# hdr_indels = indel_total
else:
none_total, perc_none = NA, NA
del_total, perc_del = NA, NA
ins_total, perc_ins = NA, NA
indel_total, perc_indel = NA, NA
# Assemble our output line
out = ( # type: Tuple[Any]
tag,
count,
perc_count,
total_ins,
avg_ins,
std_ins,
total_del,
avg_del,
std_del,
total_mis,
avg_mis,
std_mis,
none_total,
perc_none,
del_total,
perc_del,
ins_total,
perc_ins,
indel_total,
perc_indel
)
out = map(str, out) # type: Iterable[str]
cfile.write('\t'.join(out) + '\n')
cfile.flush()
total_counts[tag] += count
# Write full classifications
if counted_total == total_reads:
logging.warning("Classified all reads")
else:
logging.error("%s reads missing after classification", total_reads - counted_total)
logging.warning(class_header)
# return hdr_indels, total_counts
return total_counts
|
the-stack_106_22326 | from PyQt5.QtWidgets import QSizePolicy, QWidget, QTextEdit, QHBoxLayout, QLabel
class GuiProjectDescription(object):
tab2 = None
text_edit = None
tab2box = None
authors_widget = None
authors_label = None
authors_box = None
project_description = "The aim of the project is to implement an" \
"optimization algorithm inspired by nature. \n\n" \
"The chosen optimization problem is the coloring of" \
"the graph. As part of the project, a simulator" \
" should also be prepared for a given optimization" \
" problem, in which it is possible to test the " \
"implemented genetic algorithm.\n\n"
authors = "Paweł Szynal\n" \
"Nr albumu: 226026\n" \
"\nKamil Zdeb\n" \
"Nr albumu: 235871\n"
@staticmethod
def get_project_description(project_description=project_description):
return project_description
@staticmethod
def get_project_authors(authors=authors):
return authors
def __init__(self, tab_widget_group_box):
self.create_description_widget(tab_widget_group_box)
def create_description_widget(self, tab_widget_group_box):
tab_widget_group_box.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Ignored)
self.tab2 = QWidget()
self.text_edit = QTextEdit()
self.text_edit.setPlainText(GuiProjectDescription.get_project_description())
self.text_edit.setReadOnly(True)
self.tab2box = QHBoxLayout()
self.tab2box.setContentsMargins(5, 5, 5, 5)
self.tab2box.addWidget(self.text_edit)
self.tab2.setLayout(self.tab2box)
tab_widget_group_box.addTab(self.tab2, "Project Description")
self.authors_widget = QWidget()
self.authors_label = QLabel()
self.authors_label.setText(GuiProjectDescription.get_project_authors())
self.authors_box = QHBoxLayout()
self.authors_box.setContentsMargins(5, 5, 5, 5)
self.authors_box.addWidget(self.authors_label)
self.authors_widget.setLayout(self.authors_box)
tab_widget_group_box.addTab(self.authors_widget, "Authors")
|
the-stack_106_22328 | from . import Databricks
class DBFS(Databricks.Databricks):
def __init__(self, url):
super().__init__()
self._api_type = 'dbfs'
self._url = url
def addBlock(self, data, handle):
endpoint = 'add-block'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
"data": data,
"handle": handle
}
return self._post(url, payload)
def closeStream(self, handle):
endpoint = 'close'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
"handle": handle
}
return self._post(url, payload)
def createFile(self, path, overwrite):
endpoint = 'create'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
'path': path,
'overwrite': overwrite
}
return self._post(url, payload)
def deleteFile(self, path, recursive):
endpoint = 'delete'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
'path': path,
'recurisve': recursive
}
return self._post(url, payload)
def getStatus(self, path):
endpoint = 'get-status'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
'path': path
}
return self._post(url, payload)
def listFiles(self, path):
endpoint = 'list'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
'path': path
}
return self._post(url, payload)
def makeDirs(self, path):
endpoint = 'mkdirs'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
'path': path
}
return self._post(url, payload)
def moveFiles(self, source_path, target_path):
endpoint = 'move'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
'source_path': source_path,
'target_path': target_path
}
return self._post(url, payload)
def putFiles(self, path, overwrite, files, contents=None):
endpoint = 'put'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
'path': path,
'contents': contents,
'overwrite': overwrite
}
if contents is None:
payload.pop('contents')
return self._post(url, payload=payload, files=files)
def readFiles(self, path, offset, length):
endpoint = 'read'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
'path': path,
'offset': offset,
'length': length
}
return self._post(url, payload)
if __name__ == '__main__':
dbfs = DBFS()
|
the-stack_106_22329 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw`."""
##############################################################################
# IMPORTS
# STDLIB
import abc
import copy
# THIRD PARTY
import pytest
import numpy as np
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
import astropy.constants as const
from astropy.cosmology import (FLRW, FlatLambdaCDM, Flatw0waCDM, FlatwCDM,
LambdaCDM, Planck18, w0waCDM, w0wzCDM, wCDM, wpwaCDM)
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.flrw import (a_B_c2, critdens_const, ellipkinc,
H0units_to_invs, hyp2f1, quad)
from astropy.cosmology.parameter import Parameter
from astropy.utils.compat.optional_deps import HAS_SCIPY
from .conftest import get_redshift_methods
from .test_core import CosmologySubclassTest as CosmologyTest, valid_zs, invalid_zs
from .test_core import FlatCosmologyMixinTest, ParameterTestMixin
##############################################################################
# SETUP / TEARDOWN
class SubFLRW(FLRW):
def w(self, z):
return super().w(z)
##############################################################################
# TESTS
##############################################################################
@pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed")
def test_optional_deps_functions():
"""Test stand-in functions when optional dependencies not installed."""
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.integrate'"):
quad()
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"):
ellipkinc()
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"):
hyp2f1()
##############################################################################
class ParameterH0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` H0 on a Cosmology.
H0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_H0(self, cosmo_cls, cosmo):
"""Test Parameter ``H0``."""
unit = u.Unit("km/(s Mpc)")
# on the class
assert isinstance(cosmo_cls.H0, Parameter)
assert "Hubble constant" in cosmo_cls.H0.__doc__
assert cosmo_cls.H0.unit == unit
# validation
assert cosmo_cls.H0.validate(cosmo, 1) == 1 * unit
assert cosmo_cls.H0.validate(cosmo, 10 * unit) == 10 * unit
with pytest.raises(ValueError, match="H0 is a non-scalar quantity"):
cosmo_cls.H0.validate(cosmo, [1, 2])
# on the instance
assert cosmo.H0 is cosmo._H0
assert cosmo.H0 == self._cls_args["H0"]
assert isinstance(cosmo.H0, u.Quantity) and cosmo.H0.unit == unit
def test_init_H0(self, cosmo_cls, ba):
"""Test initialization for values of ``H0``."""
# test that it works with units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.H0 == ba.arguments["H0"]
# also without units
ba.arguments["H0"] = ba.arguments["H0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.H0.value == ba.arguments["H0"]
# fails for non-scalar
ba.arguments["H0"] = u.Quantity([70, 100], u.km / u.s / u.Mpc)
with pytest.raises(ValueError, match="H0 is a non-scalar quantity"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterOm0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Om0 on a Cosmology.
Om0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Om0(self, cosmo_cls, cosmo):
"""Test Parameter ``Om0``."""
# on the class
assert isinstance(cosmo_cls.Om0, Parameter)
assert "Omega matter" in cosmo_cls.Om0.__doc__
# validation
assert cosmo_cls.Om0.validate(cosmo, 1) == 1
assert cosmo_cls.Om0.validate(cosmo, 10 * u.one) == 10
with pytest.raises(ValueError, match="Om0 cannot be negative"):
cosmo_cls.Om0.validate(cosmo, -1)
# on the instance
assert cosmo.Om0 is cosmo._Om0
assert cosmo.Om0 == self._cls_args["Om0"]
assert isinstance(cosmo.Om0, float)
def test_init_Om0(self, cosmo_cls, ba):
"""Test initialization for values of ``Om0``."""
# test that it works with units
ba.arguments["Om0"] = ba.arguments["Om0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Om0 == ba.arguments["Om0"]
# also without units
ba.arguments["Om0"] = ba.arguments["Om0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Om0 == ba.arguments["Om0"]
# fails for negative numbers
ba.arguments["Om0"] = -0.27
with pytest.raises(ValueError, match="Om0 cannot be negative."):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterOde0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Ode0 on a Cosmology.
Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Parameter_Ode0(self, cosmo_cls):
"""Test Parameter ``Ode0`` on the class."""
assert isinstance(cosmo_cls.Ode0, Parameter)
assert "Omega dark energy" in cosmo_cls.Ode0.__doc__
def test_Parameter_Ode0_validation(self, cosmo_cls, cosmo):
"""Test Parameter ``Ode0`` validation."""
assert cosmo_cls.Ode0.validate(cosmo, 1.1) == 1.1
assert cosmo_cls.Ode0.validate(cosmo, 10 * u.one) == 10.0
with pytest.raises(TypeError, match="only dimensionless"):
cosmo_cls.Ode0.validate(cosmo, 10 * u.km)
def test_Ode0(self, cosmo):
"""Test Parameter ``Ode0`` validation."""
# if Ode0 is a parameter, test its value
assert cosmo.Ode0 is cosmo._Ode0
assert cosmo.Ode0 == self._cls_args["Ode0"]
assert isinstance(cosmo.Ode0, float)
def test_init_Ode0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ode0``."""
# test that it works with units
ba.arguments["Ode0"] = ba.arguments["Ode0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == ba.arguments["Ode0"]
# also without units
ba.arguments["Ode0"] = ba.arguments["Ode0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == ba.arguments["Ode0"]
# Setting param to 0 respects that. Note this test uses ``Ode()``.
ba.arguments["Ode0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert u.allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
assert u.allclose(cosmo.Ode(1), 0)
# Must be dimensionless or have no units. Errors otherwise.
ba.arguments["Ode0"] = 10 * u.km
with pytest.raises(TypeError, match="only dimensionless"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterTcmb0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Tcmb0 on a Cosmology.
Tcmb0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Tcmb0(self, cosmo_cls, cosmo):
"""Test Parameter ``Tcmb0``."""
# on the class
assert isinstance(cosmo_cls.Tcmb0, Parameter)
assert "Temperature of the CMB" in cosmo_cls.Tcmb0.__doc__
assert cosmo_cls.Tcmb0.unit == u.K
# validation
assert cosmo_cls.Tcmb0.validate(cosmo, 1) == 1 * u.K
assert cosmo_cls.Tcmb0.validate(cosmo, 10 * u.K) == 10 * u.K
with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"):
cosmo_cls.Tcmb0.validate(cosmo, [1, 2])
# on the instance
assert cosmo.Tcmb0 is cosmo._Tcmb0
assert cosmo.Tcmb0 == self.cls_kwargs["Tcmb0"]
assert isinstance(cosmo.Tcmb0, u.Quantity) and cosmo.Tcmb0.unit == u.K
def test_init_Tcmb0(self, cosmo_cls, ba):
"""Test initialization for values of ``Tcmb0``."""
# test that it works with units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Tcmb0 == ba.arguments["Tcmb0"]
# also without units
ba.arguments["Tcmb0"] = ba.arguments["Tcmb0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Tcmb0.value == ba.arguments["Tcmb0"]
# must be a scalar
ba.arguments["Tcmb0"] = u.Quantity([0.0, 2], u.K)
with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterNeffTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Neff on a Cosmology.
Neff is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Neff(self, cosmo_cls, cosmo):
"""Test Parameter ``Neff``."""
# on the class
assert isinstance(cosmo_cls.Neff, Parameter)
assert "Number of effective neutrino species" in cosmo_cls.Neff.__doc__
# validation
assert cosmo_cls.Neff.validate(cosmo, 1) == 1
assert cosmo_cls.Neff.validate(cosmo, 10 * u.one) == 10
with pytest.raises(ValueError, match="Neff cannot be negative"):
cosmo_cls.Neff.validate(cosmo, -1)
# on the instance
assert cosmo.Neff is cosmo._Neff
assert cosmo.Neff == self.cls_kwargs.get("Neff", 3.04)
assert isinstance(cosmo.Neff, float)
def test_init_Neff(self, cosmo_cls, ba):
"""Test initialization for values of ``Neff``."""
# test that it works with units
ba.arguments["Neff"] = ba.arguments["Neff"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Neff == ba.arguments["Neff"]
# also without units
ba.arguments["Neff"] = ba.arguments["Neff"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Neff == ba.arguments["Neff"]
ba.arguments["Neff"] = -1
with pytest.raises(ValueError):
cosmo_cls(*ba.args, **ba.kwargs)
class Parameterm_nuTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` m_nu on a Cosmology.
m_nu is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_m_nu(self, cosmo_cls, cosmo):
"""Test Parameter ``m_nu``."""
# on the class
assert isinstance(cosmo_cls.m_nu, Parameter)
assert "Mass of neutrino species" in cosmo_cls.m_nu.__doc__
assert cosmo_cls.m_nu.unit == u.eV
assert cosmo_cls.m_nu.equivalencies == u.mass_energy()
assert cosmo_cls.m_nu.format_spec == ""
# on the instance
# assert cosmo.m_nu is cosmo._m_nu
assert u.allclose(cosmo.m_nu, [0.0, 0.0, 0.0] * u.eV)
# set differently depending on the other inputs
if cosmo.Tnu0.value == 0:
assert cosmo.m_nu is None
elif not cosmo._massivenu: # only massless
assert u.allclose(cosmo.m_nu, 0 * u.eV)
elif self._nmasslessnu == 0: # only massive
assert cosmo.m_nu == cosmo._massivenu_mass
else: # a mix -- the most complicated case
assert u.allclose(cosmo.m_nu[:self._nmasslessnu], 0 * u.eV)
assert u.allclose(cosmo.m_nu[self._nmasslessnu], cosmo._massivenu_mass)
def test_init_m_nu(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu``.
Note this requires the class to have a property ``has_massive_nu``.
"""
# Test that it works when m_nu has units.
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert np.all(cosmo.m_nu == ba.arguments["m_nu"]) # (& checks len, unit)
assert not cosmo.has_massive_nu
assert cosmo.m_nu.unit == u.eV # explicitly check unit once.
# And it works when m_nu doesn't have units.
ba.arguments["m_nu"] = ba.arguments["m_nu"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert np.all(cosmo.m_nu.value == ba.arguments["m_nu"])
assert not cosmo.has_massive_nu
# A negative m_nu raises an exception.
tba = copy.copy(ba)
tba.arguments["m_nu"] = u.Quantity([-0.3, 0.2, 0.1], u.eV)
with pytest.raises(ValueError, match="invalid"):
cosmo_cls(*tba.args, **tba.kwargs)
def test_init_m_nu_and_Neff(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu`` and ``Neff``.
Note this test requires ``Neff`` as constructor input, and a property
``has_massive_nu``.
"""
# Mismatch with Neff = wrong number of neutrinos
tba = copy.copy(ba)
tba.arguments["Neff"] = 4.05
tba.arguments["m_nu"] = u.Quantity([0.15, 0.2, 0.1], u.eV)
with pytest.raises(ValueError, match="unexpected number of neutrino"):
cosmo_cls(*tba.args, **tba.kwargs)
# No neutrinos, but Neff
tba.arguments["m_nu"] = 0
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
assert not cosmo.has_massive_nu
assert len(cosmo.m_nu) == 4
assert cosmo.m_nu.unit == u.eV
assert u.allclose(cosmo.m_nu, 0 * u.eV)
# TODO! move this test when create ``test_nu_relative_density``
assert u.allclose(cosmo.nu_relative_density(1.0), 0.22710731766 * 4.05, rtol=1e-6)
# All massive neutrinos case, len from Neff
tba.arguments["m_nu"] = 0.1 * u.eV
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
assert cosmo.has_massive_nu
assert len(cosmo.m_nu) == 4
assert cosmo.m_nu.unit == u.eV
assert u.allclose(cosmo.m_nu, [0.1, 0.1, 0.1, 0.1] * u.eV)
def test_init_m_nu_override_by_Tcmb0(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu``.
Note this test requires ``Tcmb0`` as constructor input, and a property
``has_massive_nu``.
"""
# If Neff = 0, m_nu is None.
tba = copy.copy(ba)
tba.arguments["Neff"] = 0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.m_nu is None
assert not cosmo.has_massive_nu
# If Tcmb0 = 0, m_nu is None
tba = copy.copy(ba)
tba.arguments["Tcmb0"] = 0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.m_nu is None
assert not cosmo.has_massive_nu
class ParameterOb0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Ob0 on a Cosmology.
Ob0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Ob0(self, cosmo_cls, cosmo):
"""Test Parameter ``Ob0``."""
# on the class
assert isinstance(cosmo_cls.Ob0, Parameter)
assert "Omega baryon;" in cosmo_cls.Ob0.__doc__
# validation
assert cosmo_cls.Ob0.validate(cosmo, None) is None
assert cosmo_cls.Ob0.validate(cosmo, 0.1) == 0.1
assert cosmo_cls.Ob0.validate(cosmo, 0.1 * u.one) == 0.1
with pytest.raises(ValueError, match="Ob0 cannot be negative"):
cosmo_cls.Ob0.validate(cosmo, -1)
with pytest.raises(ValueError, match="baryonic density can not be larger"):
cosmo_cls.Ob0.validate(cosmo, cosmo.Om0 + 1)
# on the instance
assert cosmo.Ob0 is cosmo._Ob0
assert cosmo.Ob0 == 0.03
def test_init_Ob0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ob0``."""
# test that it works with units
assert isinstance(ba.arguments["Ob0"], u.Quantity)
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == ba.arguments["Ob0"]
# also without units
ba.arguments["Ob0"] = ba.arguments["Ob0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == ba.arguments["Ob0"]
# Setting param to 0 respects that. Note this test uses ``Ob()``.
ba.arguments["Ob0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == 0.0
if not self.abstract_w:
assert u.allclose(cosmo.Ob(1), 0)
assert u.allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])
# Negative Ob0 errors
tba = copy.copy(ba)
tba.arguments["Ob0"] = -0.04
with pytest.raises(ValueError, match="Ob0 cannot be negative"):
cosmo_cls(*tba.args, **tba.kwargs)
# Ob0 > Om0 errors
tba.arguments["Ob0"] = tba.arguments["Om0"] + 0.1
with pytest.raises(ValueError, match="baryonic density can not be larger"):
cosmo_cls(*tba.args, **tba.kwargs)
# No baryons specified means baryon-specific methods fail.
tba = copy.copy(ba)
tba.arguments.pop("Ob0", None)
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
with pytest.raises(ValueError):
cosmo.Ob(1)
# also means DM fraction is undefined
with pytest.raises(ValueError):
cosmo.Odm(1)
# The default value is None
assert cosmo_cls._init_signature.parameters["Ob0"].default is None
class TestFLRW(CosmologyTest,
ParameterH0TestMixin, ParameterOm0TestMixin, ParameterOde0TestMixin,
ParameterTcmb0TestMixin, ParameterNeffTestMixin, Parameterm_nuTestMixin,
ParameterOb0TestMixin):
"""Test :class:`astropy.cosmology.FLRW`."""
abstract_w = True
def setup_class(self):
"""
Setup for testing.
FLRW is abstract, so tests are done on a subclass.
"""
# make sure SubCosmology is known
_COSMOLOGY_CLASSES["SubFLRW"] = SubFLRW
self.cls = SubFLRW
self._cls_args = dict(H0=70 * u.km / u.s / u.Mpc, Om0=0.27 * u.one, Ode0=0.73 * u.one)
self.cls_kwargs = dict(Tcmb0=3.0 * u.K, Ob0=0.03 * u.one,
name=self.__class__.__name__, meta={"a": "b"})
def teardown_class(self):
super().teardown_class(self)
_COSMOLOGY_CLASSES.pop("SubFLRW", None)
@pytest.fixture(scope="class")
def nonflatcosmo(self):
"""A non-flat cosmology used in equivalence tests."""
return LambdaCDM(70, 0.4, 0.8)
# ===============================================================
# Method & Attribute Tests
def test_init(self, cosmo_cls):
"""Test initialization."""
super().test_init(cosmo_cls)
# TODO! tests for initializing calculated values, e.g. `h`
# TODO! transfer tests for initializing neutrinos
def test_init_Tcmb0_zeroing(self, cosmo_cls, ba):
"""Test if setting Tcmb0 parameter to 0 influences other parameters.
TODO: consider moving this test to ``FLRWSubclassTest``
"""
ba.arguments["Tcmb0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ogamma0 == 0.0
assert cosmo.Onu0 == 0.0
if not self.abstract_w:
assert u.allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0])
assert u.allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
assert u.allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
assert u.allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0])
# ---------------------------------------------------------------
# Properties
def test_Odm0(self, cosmo_cls, cosmo):
"""Test property ``Odm0``."""
# on the class
assert isinstance(cosmo_cls.Odm0, property)
assert cosmo_cls.Odm0.fset is None # immutable
# on the instance
assert cosmo.Odm0 is cosmo._Odm0
# Odm0 can be None, if Ob0 is None. Otherwise DM = matter - baryons.
if cosmo.Ob0 is None:
assert cosmo.Odm0 is None
else:
assert np.allclose(cosmo.Odm0, cosmo.Om0 - cosmo.Ob0)
def test_Ok0(self, cosmo_cls, cosmo):
"""Test property ``Ok0``."""
# on the class
assert isinstance(cosmo_cls.Ok0, property)
assert cosmo_cls.Ok0.fset is None # immutable
# on the instance
assert cosmo.Ok0 is cosmo._Ok0
assert np.allclose(cosmo.Ok0, 1.0 - (cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0))
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
# on the class
assert isinstance(cosmo_cls.is_flat, property)
assert cosmo_cls.is_flat.fset is None # immutable
# on the instance
assert isinstance(cosmo.is_flat, bool)
assert cosmo.is_flat is bool((cosmo.Ok0 == 0.0) and (cosmo.Otot0 == 1.0))
def test_Tnu0(self, cosmo_cls, cosmo):
"""Test property ``Tnu0``."""
# on the class
assert isinstance(cosmo_cls.Tnu0, property)
assert cosmo_cls.Tnu0.fset is None # immutable
# on the instance
assert cosmo.Tnu0 is cosmo._Tnu0
assert cosmo.Tnu0.unit == u.K
assert u.allclose(cosmo.Tnu0, 0.7137658555036082 * cosmo.Tcmb0, rtol=1e-5)
def test_has_massive_nu(self, cosmo_cls, cosmo):
"""Test property ``has_massive_nu``."""
# on the class
assert isinstance(cosmo_cls.has_massive_nu, property)
assert cosmo_cls.has_massive_nu.fset is None # immutable
# on the instance
if cosmo.Tnu0 == 0:
assert cosmo.has_massive_nu is False
else:
assert cosmo.has_massive_nu is cosmo._massivenu
def test_h(self, cosmo_cls, cosmo):
"""Test property ``h``."""
# on the class
assert isinstance(cosmo_cls.h, property)
assert cosmo_cls.h.fset is None # immutable
# on the instance
assert cosmo.h is cosmo._h
assert np.allclose(cosmo.h, cosmo.H0.value / 100.0)
def test_hubble_time(self, cosmo_cls, cosmo):
"""Test property ``hubble_time``."""
# on the class
assert isinstance(cosmo_cls.hubble_time, property)
assert cosmo_cls.hubble_time.fset is None # immutable
# on the instance
assert cosmo.hubble_time is cosmo._hubble_time
assert u.allclose(cosmo.hubble_time, (1 / cosmo.H0) << u.Gyr)
def test_hubble_distance(self, cosmo_cls, cosmo):
"""Test property ``hubble_distance``."""
# on the class
assert isinstance(cosmo_cls.hubble_distance, property)
assert cosmo_cls.hubble_distance.fset is None # immutable
# on the instance
assert cosmo.hubble_distance is cosmo._hubble_distance
assert cosmo.hubble_distance == (const.c / cosmo._H0).to(u.Mpc)
def test_critical_density0(self, cosmo_cls, cosmo):
"""Test property ``critical_density0``."""
# on the class
assert isinstance(cosmo_cls.critical_density0, property)
assert cosmo_cls.critical_density0.fset is None # immutable
# on the instance
assert cosmo.critical_density0 is cosmo._critical_density0
assert cosmo.critical_density0.unit == u.g / u.cm ** 3
cd0value = critdens_const * (cosmo.H0.value * H0units_to_invs) ** 2
assert cosmo.critical_density0.value == cd0value
def test_Ogamma0(self, cosmo_cls, cosmo):
"""Test property ``Ogamma0``."""
# on the class
assert isinstance(cosmo_cls.Ogamma0, property)
assert cosmo_cls.Ogamma0.fset is None # immutable
# on the instance
assert cosmo.Ogamma0 is cosmo._Ogamma0
# Ogamma cor \propto T^4/rhocrit
expect = a_B_c2 * cosmo.Tcmb0.value ** 4 / cosmo.critical_density0.value
assert np.allclose(cosmo.Ogamma0, expect)
# check absolute equality to 0 if Tcmb0 is 0
if cosmo.Tcmb0 == 0:
assert cosmo.Ogamma0 == 0
def test_Onu0(self, cosmo_cls, cosmo):
"""Test property ``Onu0``."""
# on the class
assert isinstance(cosmo_cls.Onu0, property)
assert cosmo_cls.Onu0.fset is None # immutable
# on the instance
assert cosmo.Onu0 is cosmo._Onu0
# neutrino temperature <= photon temperature since the neutrinos
# decouple first.
if cosmo.has_massive_nu: # Tcmb0 > 0 & has massive
# check the expected formula
assert cosmo.Onu0 == cosmo.Ogamma0 * cosmo.nu_relative_density(0)
# a sanity check on on the ratio of neutrinos to photons
# technically it could be 1, but not for any of the tested cases.
assert cosmo.nu_relative_density(0) <= 1
elif cosmo.Tcmb0 == 0:
assert cosmo.Onu0 == 0
else:
# check the expected formula
assert cosmo.Onu0 == 0.22710731766 * cosmo._Neff * cosmo.Ogamma0
# and check compatibility with nu_relative_density
assert np.allclose(cosmo.nu_relative_density(0), 0.22710731766 * cosmo._Neff)
def test_Otot0(self, cosmo):
"""Test :attr:`astropy.cosmology.FLRW.Otot0`."""
assert cosmo.Otot0 == cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ode0 + cosmo.Ok0
# ---------------------------------------------------------------
# Methods
def test_w(self, cosmo):
"""Test abstract :meth:`astropy.cosmology.FLRW.w`."""
with pytest.raises(NotImplementedError, match="not implemented"):
cosmo.w(1)
def test_Otot(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.Otot`."""
exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError
with pytest.raises(exception):
assert cosmo.Otot(1)
def test_efunc_vs_invefunc(self, cosmo):
"""
Test that efunc and inv_efunc give inverse values.
Here they just fail b/c no ``w(z)`` or no scipy.
"""
exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError
with pytest.raises(exception):
cosmo.efunc(0.5)
with pytest.raises(exception):
cosmo.inv_efunc(0.5)
# ---------------------------------------------------------------
# from Cosmology
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# don't change any values
kwargs = cosmo._init_arguments.copy()
kwargs.pop("name", None) # make sure not setting name
c = cosmo.clone(**kwargs)
assert c.__class__ == cosmo.__class__
assert c.name == cosmo.name + " (modified)"
assert c.is_equivalent(cosmo)
# change ``H0``
# Note that H0 affects Ode0 because it changes Ogamma0
c = cosmo.clone(H0=100)
assert c.__class__ == cosmo.__class__
assert c.name == cosmo.name + " (modified)"
assert c.H0.value == 100
for n in (set(cosmo.__parameters__) - {"H0"}):
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1))
assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)
assert not u.allclose(c.Onu0, cosmo.Onu0)
# change multiple things
c = cosmo.clone(name="new name", H0=100, Tcmb0=2.8, meta=dict(zz="tops"))
assert c.__class__ == cosmo.__class__
assert c.name == "new name"
assert c.H0.value == 100
assert c.Tcmb0.value == 2.8
assert c.meta == {**cosmo.meta, **dict(zz="tops")}
for n in (set(cosmo.__parameters__) - {"H0", "Tcmb0"}):
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1))
assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)
assert not u.allclose(c.Onu0, cosmo.Onu0)
assert not u.allclose(c.Tcmb0.value, cosmo.Tcmb0.value)
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.is_equivalent`."""
super().test_is_equivalent(cosmo) # pass to CosmologySubclassTest
# test against a FlatFLRWMixin
# case (3) in FLRW.is_equivalent
if isinstance(cosmo, FlatLambdaCDM):
assert cosmo.is_equivalent(Planck18)
assert Planck18.is_equivalent(cosmo)
else:
assert not cosmo.is_equivalent(Planck18)
assert not Planck18.is_equivalent(cosmo)
class FLRWSubclassTest(TestFLRW):
"""
Test subclasses of :class:`astropy.cosmology.FLRW`.
This is broken away from ``TestFLRW``, because ``FLRW`` is an ABC and
subclasses must override some methods.
"""
abstract_w = False
@abc.abstractmethod
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
# ===============================================================
# Method & Attribute Tests
_FLRW_redshift_methods = get_redshift_methods(FLRW, allow_private=True, allow_z2=False)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize('method', _FLRW_redshift_methods)
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
with pytest.raises(exc):
getattr(cosmo, method)(z)
@pytest.mark.parametrize("z", valid_zs)
@abc.abstractmethod
def test_w(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.w`.
Since ``w`` is abstract, each test class needs to define further tests.
"""
# super().test_w(cosmo, z) # NOT b/c abstract `w(z)`
w = cosmo.w(z)
assert np.shape(w) == np.shape(z) # test same shape
assert u.Quantity(w).unit == u.one # test no units or dimensionless
# -------------------------------------------
@pytest.mark.parametrize("z", valid_zs)
def test_Otot(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.Otot`."""
# super().test_Otot(cosmo) # NOT b/c abstract `w(z)`
assert np.allclose(
cosmo.Otot(z),
cosmo.Om(z) + cosmo.Ogamma(z) + cosmo.Onu(z) + cosmo.Ode(z) + cosmo.Ok(z))
# ---------------------------------------------------------------
def test_efunc_vs_invefunc(self, cosmo):
"""Test that ``efunc`` and ``inv_efunc`` give inverse values.
Note that the test doesn't need scipy because it doesn't need to call
``de_density_scale``.
"""
# super().test_efunc_vs_invefunc(cosmo) # NOT b/c abstract `w(z)`
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# -----------------------------------------------------------------------------
class ParameterFlatOde0TestMixin(ParameterOde0TestMixin):
"""Tests for `astropy.cosmology.Parameter` Ode0 on a flat Cosmology.
This will augment or override some tests in ``ParameterOde0TestMixin``.
Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Parameter_Ode0(self, cosmo_cls):
"""Test Parameter ``Ode0`` on the class."""
super().test_Parameter_Ode0(cosmo_cls)
assert cosmo_cls.Ode0.derived == True
def test_Ode0(self, cosmo):
"""Test no-longer-Parameter ``Ode0``."""
assert cosmo.Ode0 is cosmo._Ode0
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0)
def test_init_Ode0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ode0``."""
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ok0)
# Ode0 is not in the signature
with pytest.raises(TypeError, match="Ode0"):
cosmo_cls(*ba.args, **ba.kwargs, Ode0=1)
class FlatFLRWMixinTest(FlatCosmologyMixinTest, ParameterFlatOde0TestMixin):
"""Tests for :class:`astropy.cosmology.FlatFLRWMixin` subclasses.
E.g to use this class::
class TestFlatSomeFLRW(FlatFLRWMixinTest, TestSomeFLRW):
...
"""
def setup_class(self):
"""Setup for testing.
Set up as for regular FLRW test class, but remove dark energy component
since flat cosmologies are forbidden Ode0 as an argument,
see ``test_init_subclass``.
"""
super().setup_class(self)
self._cls_args.pop("Ode0")
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# class-level
def test_init_subclass(self, cosmo_cls):
"""Test initializing subclass, mostly that can't have Ode0 in init."""
super().test_init_subclass(cosmo_cls)
with pytest.raises(TypeError, match="subclasses of"):
class HASOde0SubClass(cosmo_cls):
def __init__(self, Ode0):
pass
_COSMOLOGY_CLASSES.pop(HASOde0SubClass.__qualname__, None)
# ---------------------------------------------------------------
# instance-level
def test_init(self, cosmo_cls):
super().test_init(cosmo_cls)
cosmo = cosmo_cls(*self.cls_args, **self.cls_kwargs)
assert cosmo._Ok0 == 0.0
assert cosmo._Ode0 == 1.0 - (cosmo._Om0 + cosmo._Ogamma0 + cosmo._Onu0 + cosmo._Ok0)
def test_Ok0(self, cosmo_cls, cosmo):
"""Test property ``Ok0``."""
super().test_Ok0(cosmo_cls, cosmo)
# for flat cosmologies, Ok0 is not *close* to 0, it *is* 0
assert cosmo.Ok0 == 0.0
def test_Otot0(self, cosmo):
"""Test :attr:`astropy.cosmology.FLRW.Otot0`. Should always be 1."""
super().test_Otot0(cosmo)
# for flat cosmologies, Otot0 is not *close* to 1, it *is* 1
assert cosmo.Otot0 == 1.0
@pytest.mark.parametrize("z", valid_zs)
def test_Otot(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.Otot`. Should always be 1."""
super().test_Otot(cosmo, z)
# for flat cosmologies, Otot is 1, within precision.
assert u.allclose(cosmo.Otot(z), 1.0)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize('method', FLRWSubclassTest._FLRW_redshift_methods - {"Otot"})
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
# ---------------------------------------------------------------
def test_is_equivalent(self, cosmo, nonflatcosmo):
"""Test :meth:`astropy.cosmology.FLRW.is_equivalent`."""
super().test_is_equivalent(cosmo) # pass to TestFLRW
# against non-flat Cosmology
assert not cosmo.is_equivalent(nonflatcosmo)
assert not nonflatcosmo.is_equivalent(cosmo)
# non-flat version of class
nonflat_cosmo_cls = cosmo.__class__.mro()[3]
# keys check in `test_is_equivalent_nonflat_class_different_params`
# non-flat
nonflat = nonflat_cosmo_cls(*self.cls_args, Ode0=0.9, **self.cls_kwargs)
assert not nonflat.is_equivalent(cosmo)
assert not cosmo.is_equivalent(nonflat)
# flat, but not FlatFLRWMixin
flat = nonflat_cosmo_cls(*self.cls_args,
Ode0=1.0 - cosmo.Om0 - cosmo.Ogamma0 - cosmo.Onu0,
**self.cls_kwargs)
flat._Ok0 = 0.0
assert flat.is_equivalent(cosmo)
assert cosmo.is_equivalent(flat)
def test_repr(self, cosmo_cls, cosmo):
"""
Test method ``.__repr__()``. Skip non-flat superclass test.
e.g. `TestFlatLambdaCDDM` -> `FlatFLRWMixinTest`
vs `TestFlatLambdaCDDM` -> `TestLambdaCDDM` -> `FlatFLRWMixinTest`
"""
FLRWSubclassTest.test_repr(self, cosmo_cls, cosmo)
# test eliminated Ode0 from parameters
assert "Ode0" not in repr(cosmo)
# -----------------------------------------------------------------------------
class TestLambdaCDM(FLRWSubclassTest):
"""Test :class:`astropy.cosmology.LambdaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = LambdaCDM
# ===============================================================
# Method & Attribute Tests
_FLRW_redshift_methods = get_redshift_methods(LambdaCDM, allow_private=True, allow_z2=False) - {"_dS_age"}
# `_dS_age` is removed because it doesn't strictly rely on the value of `z`,
# so any input that doesn't trip up ``np.shape`` is "valid"
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize('method', _FLRW_redshift_methods)
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
@pytest.mark.parametrize("z", valid_zs)
def test_w(self, cosmo, z):
"""Test :meth:`astropy.cosmology.LambdaCDM.w`."""
super().test_w(cosmo, z)
w = cosmo.w(z)
assert u.allclose(w, -1.0)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = ("LambdaCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27,"
" Ode0=0.73, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV,"
" Ob0=0.03)")
assert repr(cosmo) == expected
# -----------------------------------------------------------------------------
class TestFlatLambdaCDM(FlatFLRWMixinTest, TestLambdaCDM):
"""Test :class:`astropy.cosmology.FlatLambdaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = FlatLambdaCDM
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize('method', TestLambdaCDM._FLRW_redshift_methods - {"Otot"})
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
# ===============================================================
# Method & Attribute Tests
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = ("FlatLambdaCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s),"
" Om0=0.27, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV,"
" Ob0=0.03)")
assert repr(cosmo) == expected
# -----------------------------------------------------------------------------
class Parameterw0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` w0 on a Cosmology.
w0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_w0(self, cosmo_cls, cosmo):
"""Test Parameter ``w0``."""
# on the class
assert isinstance(cosmo_cls.w0, Parameter)
assert "Dark energy equation of state" in cosmo_cls.w0.__doc__
assert cosmo_cls.w0.unit is None
# on the instance
assert cosmo.w0 is cosmo._w0
assert cosmo.w0 == self.cls_kwargs["w0"]
def test_init_w0(self, cosmo_cls, ba):
"""Test initialization for values of ``w0``."""
# test that it works with units
ba.arguments["w0"] = ba.arguments["w0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.w0 == ba.arguments["w0"]
# also without units
ba.arguments["w0"] = ba.arguments["w0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.w0 == ba.arguments["w0"]
# must be dimensionless
ba.arguments["w0"] = 10 * u.km
with pytest.raises(TypeError):
cosmo_cls(*ba.args, **ba.kwargs)
class TestwCDM(FLRWSubclassTest, Parameterw0TestMixin):
"""Test :class:`astropy.cosmology.wCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = wCDM
self.cls_kwargs.update(w0=-0.5)
# ===============================================================
# Method & Attribute Tests
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# `w` params
c = cosmo.clone(w0=0.1)
assert c.w0 == 0.1
for n in (set(cosmo.__parameters__) - {"w0"}):
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1))
@pytest.mark.parametrize("z", valid_zs)
def test_w(self, cosmo, z):
"""Test :meth:`astropy.cosmology.wCDM.w`."""
super().test_w(cosmo, z)
w = cosmo.w(z)
assert u.allclose(w, self.cls_kwargs["w0"])
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = ("wCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27,"
" Ode0=0.73, w0=-0.5, Tcmb0=3.0 K, Neff=3.04,"
" m_nu=[0. 0. 0.] eV, Ob0=0.03)")
assert repr(cosmo) == expected
# -----------------------------------------------------------------------------
class TestFlatwCDM(FlatFLRWMixinTest, TestwCDM):
"""Test :class:`astropy.cosmology.FlatwCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = FlatwCDM
self.cls_kwargs.update(w0=-0.5)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = ("FlatwCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27,"
" w0=-0.5, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV,"
" Ob0=0.03)")
assert repr(cosmo) == expected
# -----------------------------------------------------------------------------
class ParameterwaTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` wa on a Cosmology.
wa is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_wa(self, cosmo_cls, cosmo):
"""Test Parameter ``wa``."""
# on the class
assert isinstance(cosmo_cls.wa, Parameter)
assert "Negative derivative" in cosmo_cls.wa.__doc__
assert cosmo_cls.wa.unit is None
# on the instance
assert cosmo.wa is cosmo._wa
assert cosmo.wa == self.cls_kwargs["wa"]
def test_init_wa(self, cosmo_cls, ba):
"""Test initialization for values of ``wa``."""
# test that it works with units
ba.arguments["wa"] = ba.arguments["wa"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wa == ba.arguments["wa"]
# also without units
ba.arguments["wa"] = ba.arguments["wa"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wa == ba.arguments["wa"]
# must be dimensionless
ba.arguments["wa"] = 10 * u.km
with pytest.raises(TypeError):
cosmo_cls(*ba.args, **ba.kwargs)
class Testw0waCDM(FLRWSubclassTest, Parameterw0TestMixin, ParameterwaTestMixin):
"""Test :class:`astropy.cosmology.w0waCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = w0waCDM
self.cls_kwargs.update(w0=-1, wa=-0.5)
# ===============================================================
# Method & Attribute Tests
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# `w` params
c = cosmo.clone(w0=0.1, wa=0.2)
assert c.w0 == 0.1
assert c.wa == 0.2
for n in (set(cosmo.__parameters__) - {"w0", "wa"}):
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1))
# @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below
def test_w(self, cosmo):
"""Test :meth:`astropy.cosmology.w0waCDM.w`."""
# super().test_w(cosmo, z)
assert u.allclose(cosmo.w(1.0), -1.25)
assert u.allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1, -1.16666667, -1.25, -1.3, -1.34848485])
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = ("w0waCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27,"
" Ode0=0.73, w0=-1.0, wa=-0.5, Tcmb0=3.0 K, Neff=3.04,"
" m_nu=[0. 0. 0.] eV, Ob0=0.03)")
assert repr(cosmo) == expected
# -----------------------------------------------------------------------------
class TestFlatw0waCDM(FlatFLRWMixinTest, Testw0waCDM):
"""Test :class:`astropy.cosmology.Flatw0waCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = Flatw0waCDM
self.cls_kwargs.update(w0=-1, wa=-0.5)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = ("Flatw0waCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s),"
" Om0=0.27, w0=-1.0, wa=-0.5, Tcmb0=3.0 K, Neff=3.04,"
" m_nu=[0. 0. 0.] eV, Ob0=0.03)")
assert repr(cosmo) == expected
# -----------------------------------------------------------------------------
class ParameterwpTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` wp on a Cosmology.
wp is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_wp(self, cosmo_cls, cosmo):
"""Test Parameter ``wp``."""
# on the class
assert isinstance(cosmo_cls.wp, Parameter)
assert "at the pivot" in cosmo_cls.wp.__doc__
assert cosmo_cls.wp.unit is None
# on the instance
assert cosmo.wp is cosmo._wp
assert cosmo.wp == self.cls_kwargs["wp"]
def test_init_wp(self, cosmo_cls, ba):
"""Test initialization for values of ``wp``."""
# test that it works with units
ba.arguments["wp"] = ba.arguments["wp"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wp == ba.arguments["wp"]
# also without units
ba.arguments["wp"] = ba.arguments["wp"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wp == ba.arguments["wp"]
# must be dimensionless
ba.arguments["wp"] = 10 * u.km
with pytest.raises(TypeError):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterzpTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` zp on a Cosmology.
zp is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_zp(self, cosmo_cls, cosmo):
"""Test Parameter ``zp``."""
# on the class
assert isinstance(cosmo_cls.zp, Parameter)
assert "pivot redshift" in cosmo_cls.zp.__doc__
assert cosmo_cls.zp.unit == cu.redshift
# on the instance
assert cosmo.zp is cosmo._zp
assert cosmo.zp == self.cls_kwargs["zp"] << cu.redshift
def test_init_zp(self, cosmo_cls, ba):
"""Test initialization for values of ``zp``."""
# test that it works with units
ba.arguments["zp"] = ba.arguments["zp"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.zp == ba.arguments["zp"]
# also without units
ba.arguments["zp"] = ba.arguments["zp"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.zp.value == ba.arguments["zp"]
# must be dimensionless
ba.arguments["zp"] = 10 * u.km
with pytest.raises(u.UnitConversionError):
cosmo_cls(*ba.args, **ba.kwargs)
class TestwpwaCDM(FLRWSubclassTest,
ParameterwpTestMixin, ParameterwaTestMixin, ParameterzpTestMixin):
"""Test :class:`astropy.cosmology.wpwaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = wpwaCDM
self.cls_kwargs.update(wp=-0.9, wa=0.2, zp=0.5)
# ===============================================================
# Method & Attribute Tests
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# `w` params
c = cosmo.clone(wp=0.1, wa=0.2, zp=14)
assert c.wp == 0.1
assert c.wa == 0.2
assert c.zp == 14
for n in (set(cosmo.__parameters__) - {"wp", "wa", "zp"}):
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1))
# @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below
def test_w(self, cosmo):
"""Test :meth:`astropy.cosmology.wpwaCDM.w`."""
# super().test_w(cosmo, z)
assert u.allclose(cosmo.w(0.5), -0.9)
assert u.allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.94848485, -0.93333333, -0.9, -0.84666667,
-0.82380952, -0.78266667])
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = ("wpwaCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27,"
" Ode0=0.73, wp=-0.9, wa=0.2, zp=0.5 redshift, Tcmb0=3.0 K,"
" Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.03)")
assert repr(cosmo) == expected
# -----------------------------------------------------------------------------
class ParameterwzTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` wz on a Cosmology.
wz is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_wz(self, cosmo_cls, cosmo):
"""Test Parameter ``wz``."""
# on the class
assert isinstance(cosmo_cls.wz, Parameter)
assert "Derivative of the dark energy" in cosmo_cls.wz.__doc__
assert cosmo_cls.wz.unit is None
# on the instance
assert cosmo.wz is cosmo._wz
assert cosmo.wz == self.cls_kwargs["wz"]
def test_init_wz(self, cosmo_cls, ba):
"""Test initialization for values of ``wz``."""
# test that it works with units
ba.arguments["wz"] = ba.arguments["wz"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wz == ba.arguments["wz"]
# also without units
ba.arguments["wz"] = ba.arguments["wz"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wz == ba.arguments["wz"]
# must be dimensionless
ba.arguments["wz"] = 10 * u.km
with pytest.raises(TypeError):
cosmo_cls(*ba.args, **ba.kwargs)
class Testw0wzCDM(FLRWSubclassTest,
Parameterw0TestMixin, ParameterwzTestMixin):
"""Test :class:`astropy.cosmology.w0wzCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = w0wzCDM
self.cls_kwargs.update(w0=-1, wz=0.5)
# ===============================================================
# Method & Attribute Tests
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# `w` params
c = cosmo.clone(w0=0.1, wz=0.2)
assert c.w0 == 0.1
assert c.wz == 0.2
for n in (set(cosmo.__parameters__) - {"w0", "wz"}):
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1))
# @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below
def test_w(self, cosmo):
"""Test :meth:`astropy.cosmology.w0wzCDM.w`."""
# super().test_w(cosmo, z)
assert u.allclose(cosmo.w(1.0), -0.5)
assert u.allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1.0, -0.75, -0.5, -0.25, 0.15])
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = ("w0wzCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27,"
" Ode0=0.73, w0=-1.0, wz=0.5, Tcmb0=3.0 K, Neff=3.04,"
" m_nu=[0. 0. 0.] eV, Ob0=0.03)")
assert repr(cosmo) == expected
|
the-stack_106_22330 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import io
import os
import random
import time
import uuid
from azure.storage.blob import (
ContentSettings,
SequenceNumberAction,
)
class PageBlobSamples():
def __init__(self, account):
self.account = account
def run_all_samples(self):
self.service = self.account.create_page_blob_service()
self.delete_blob()
self.blob_metadata()
self.blob_properties()
self.blob_exists()
self.copy_blob()
self.snapshot_blob()
self.lease_blob()
self.create_blob()
self.page_operations()
self.resize_blob()
self.set_sequence_number()
self.blob_with_bytes()
self.blob_with_stream()
self.blob_with_path()
def _get_resource_reference(self, prefix):
return '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))
def _get_blob_reference(self, prefix='blob'):
return self._get_resource_reference(prefix)
def _create_blob(self, container_name, prefix='blob'):
blob_name = self._get_resource_reference(prefix)
self.service.create_blob(container_name, blob_name, 512)
return blob_name
def _create_container(self, prefix='container'):
container_name = self._get_resource_reference(prefix)
self.service.create_container(container_name)
return container_name
def _get_random_bytes(self, size):
rand = random.Random()
result = bytearray(size)
for i in range(size):
result[i] = rand.randint(0, 255)
return bytes(result)
def delete_blob(self):
container_name = self._create_container()
blob_name = self._create_blob(container_name)
# Basic
self.service.delete_blob(container_name, blob_name)
self.service.delete_container(container_name)
def blob_metadata(self):
container_name = self._create_container()
blob_name = self._create_blob(container_name)
metadata = {'val1': 'foo', 'val2': 'blah'}
# Basic
self.service.set_blob_metadata(container_name, blob_name, metadata=metadata)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={'val1': 'foo', 'val2': 'blah'}
# Replaces values, does not merge
metadata = {'new': 'val'}
self.service.set_blob_metadata(container_name, blob_name, metadata=metadata)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={'new': 'val'}
# Capital letters
metadata = {'NEW': 'VAL'}
self.service.set_blob_metadata(container_name, blob_name, metadata=metadata)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={'new': 'VAL'}
# Clearing
self.service.set_blob_metadata(container_name, blob_name)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={}
self.service.delete_container(container_name)
def blob_properties(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
metadata = {'val1': 'foo', 'val2': 'blah'}
self.service.create_blob(container_name, blob_name, 512, metadata=metadata)
settings = ContentSettings(content_type='html', content_language='fr')
# Basic
self.service.set_blob_properties(container_name, blob_name, content_settings=settings)
blob = self.service.get_blob_properties(container_name, blob_name)
content_language = blob.properties.content_settings.content_language # fr
content_type = blob.properties.content_settings.content_type # html
content_length = blob.properties.content_length # 512
# Metadata
# Can't set metadata, but get will return metadata already on the blob
blob = self.service.get_blob_properties(container_name, blob_name)
metadata = blob.metadata # metadata={'val1': 'foo', 'val2': 'blah'}
# Replaces values, does not merge
settings = ContentSettings(content_encoding='utf-8')
self.service.set_blob_properties(container_name, blob_name, content_settings=settings)
blob = self.service.get_blob_properties(container_name, blob_name)
content_encoding = blob.properties.content_settings.content_encoding # utf-8
content_language = blob.properties.content_settings.content_language # None
self.service.delete_container(container_name)
def blob_exists(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
# Basic
exists = self.service.exists(container_name, blob_name) # False
self.service.create_blob(container_name, blob_name, 512)
exists = self.service.exists(container_name, blob_name) # True
self.service.delete_container(container_name)
def copy_blob(self):
container_name = self._create_container()
source_blob_name = self._create_blob(container_name)
# Basic
# Copy the blob from the directory to the root of the container
source = self.service.make_blob_url(container_name, source_blob_name)
copy = self.service.copy_blob(container_name, 'blob1copy', source)
# Poll for copy completion
while copy.status != 'success':
count = count + 1
if count > 5:
print('Timed out waiting for async copy to complete.')
time.sleep(30)
copy = self.service.get_blob_properties(container_name, 'blob1copy').properties.copy
# With SAS from a remote account to local blob
# Commented out as remote container, directory, blob, and sas would need to be created
'''
source_blob_url = self.service.make_blob_url(
remote_container_name,
remote_blob_name,
sas_token=remote_sas_token,
)
copy = self.service.copy_blob(destination_containername,
destination_blob_name,
source_blob_url)
'''
# Abort copy
# Commented out as this involves timing the abort to be sent while the copy is still running
# Abort copy is useful to do along with polling
# self.service.abort_copy_blob(container_name, blob_name, copy.id)
self.service.delete_container(container_name)
def snapshot_blob(self):
container_name = self._create_container()
base_blob_name = self._create_blob(container_name)
# Basic
snapshot_blob = self.service.snapshot_blob(container_name, base_blob_name)
snapshot_id = snapshot_blob.snapshot
# Set Metadata (otherwise metadata will be copied from base blob)
metadata = {'val1': 'foo', 'val2': 'blah'}
snapshot_blob = self.service.snapshot_blob(container_name, base_blob_name, metadata=metadata)
snapshot_id = snapshot_blob.snapshot
self.service.delete_container(container_name)
def lease_blob(self):
container_name = self._create_container()
blob_name1 = self._create_blob(container_name)
blob_name2 = self._create_blob(container_name)
blob_name3 = self._create_blob(container_name)
# Acquire
# Defaults to infinite lease
infinite_lease_id = self.service.acquire_blob_lease(container_name, blob_name1)
# Acquire
# Set lease time, may be between 15 and 60 seconds
fixed_lease_id = self.service.acquire_blob_lease(container_name, blob_name2, lease_duration=30)
# Acquire
# Proposed lease id
proposed_lease_id_1 = '55e97f64-73e8-4390-838d-d9e84a374321'
modified_lease_id = self.service.acquire_blob_lease(container_name,
blob_name3,
proposed_lease_id=proposed_lease_id_1,
lease_duration=30)
modified_lease_id # equal to proposed_lease_id_1
# Renew
# Resets the 30 second lease timer
# Note that the lease may be renewed even if it has expired as long as
# the container has not been leased again since the expiration of that lease
self.service.renew_blob_lease(container_name, blob_name3, proposed_lease_id_1)
# Change
# Change the lease ID of an active lease.
proposed_lease_id_2 = '55e97f64-73e8-4390-838d-d9e84a374322'
self.service.change_blob_lease(container_name, blob_name3, modified_lease_id,
proposed_lease_id=proposed_lease_id_2)
# Release
# Releasing the lease allows another client to immediately acquire the
# lease for the container as soon as the release is complete.
self.service.release_blob_lease(container_name, blob_name3, proposed_lease_id_2)
# Break
# A matching lease ID is not required.
# By default, a fixed-duration lease breaks after the remaining lease period
# elapses, and an infinite lease breaks immediately.
infinite_lease_break_time = self.service.break_blob_lease(container_name, blob_name1)
infinite_lease_break_time # 0
# Break
# By default this would leave whatever time remained of the 30 second
# lease period, but a break period can be provided to indicate when the
# break should take affect
lease_break_time = self.service.break_blob_lease(container_name, blob_name2, lease_break_period=10)
lease_break_time # 10
self.service.delete_container(container_name)
def blob_with_bytes(self):
container_name = self._create_container()
# Basic
data = self._get_random_bytes(1024)
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data)
blob = self.service.get_blob_to_bytes(container_name, blob_name)
content = blob.content # data
# Download range
blob = self.service.get_blob_to_bytes(container_name, blob_name,
start_range=3, end_range=10)
content = blob.content # data from 3-10
# Upload from index in byte array
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data, index=512)
# Content settings, metadata
settings = ContentSettings(content_type='html', content_language='fr')
metadata = {'val1': 'foo', 'val2': 'blah'}
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data,
content_settings=settings,
metadata=metadata)
blob = self.service.get_blob_to_bytes(container_name, blob_name)
metadata = blob.metadata # metadata={'val1': 'foo', 'val2': 'blah'}
content_language = blob.properties.content_settings.content_language # fr
content_type = blob.properties.content_settings.content_type # html
# Progress
# Use slightly larger data so the chunking is more visible
data = self._get_random_bytes(8 * 1024 * 1024)
def upload_callback(current, total):
print('({}, {})'.format(current, total))
def download_callback(current, total):
print('({}, {}) '.format(current, total))
blob_name = self._get_blob_reference()
print('upload: ')
self.service.create_blob_from_bytes(container_name, blob_name, data,
progress_callback=upload_callback)
print('download: ')
blob = self.service.get_blob_to_bytes(container_name, blob_name,
progress_callback=download_callback)
self.service.delete_container(container_name)
def blob_with_stream(self):
container_name = self._create_container()
# Basic
input_stream = io.BytesIO(self._get_random_bytes(512))
output_stream = io.BytesIO()
blob_name = self._get_blob_reference()
self.service.create_blob_from_stream(container_name, blob_name,
input_stream, 512)
blob = self.service.get_blob_to_stream(container_name, blob_name,
output_stream)
content_length = blob.properties.content_length
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See blob_with_bytes for these examples. The code will be very similar.
self.service.delete_container(container_name)
def blob_with_path(self):
container_name = self._create_container()
INPUT_FILE_PATH = 'blob_input.temp.dat'
OUTPUT_FILE_PATH = 'blob_output.temp.dat'
data = self._get_random_bytes(4 * 1024)
with open(INPUT_FILE_PATH, 'wb') as stream:
stream.write(data)
# Basic
blob_name = self._get_blob_reference()
self.service.create_blob_from_path(container_name, blob_name, INPUT_FILE_PATH)
blob = self.service.get_blob_to_path(container_name, blob_name, OUTPUT_FILE_PATH)
content_length = blob.properties.content_length
# Open mode
# Append to the blob instead of starting from the beginning
# Append streams are not seekable and so must be downloaded serially by setting max_connections=1.
blob = self.service.get_blob_to_path(container_name, blob_name, OUTPUT_FILE_PATH, open_mode='ab',
max_connections=1)
content_length = blob.properties.content_length # will be the same, but local blob length will be longer
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See blob_with_bytes for these examples. The code will be very similar.
self.service.delete_container(container_name)
if os.path.isfile(INPUT_FILE_PATH):
try:
os.remove(INPUT_FILE_PATH)
except:
pass
if os.path.isfile(OUTPUT_FILE_PATH):
try:
os.remove(OUTPUT_FILE_PATH)
except:
pass
def create_blob(self):
container_name = self._create_container()
# Basic
# Create a blob with no data
blob_name1 = self._get_blob_reference()
self.service.create_blob(container_name, blob_name1, 512)
# Properties
settings = ContentSettings(content_type='html', content_language='fr')
blob_name2 = self._get_blob_reference()
self.service.create_blob(container_name, blob_name2, 512, content_settings=settings)
# Metadata
metadata = {'val1': 'foo', 'val2': 'blah'}
blob_name2 = self._get_blob_reference()
self.service.create_blob(container_name, blob_name2, 512, metadata=metadata)
self.service.delete_container(container_name)
def resize_blob(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
# Basic
self.service.create_blob(container_name, blob_name, 512)
self.service.resize_blob(container_name, blob_name, 1024)
blob = self.service.get_blob_properties(container_name, blob_name)
length = blob.properties.content_length # 1024
self.service.delete_container(container_name)
def page_operations(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
self.service.create_blob(container_name, blob_name, 2048)
# Update the blob between offset 512 and 15351535
data = b'abcdefghijklmnop' * 64
self.service.update_page(container_name, blob_name, data, 512, 1535)
# List pages
print('list pages: ')
pages = self.service.get_page_ranges(container_name, blob_name)
for page in pages:
print('({}, {}) '.format(page.start, page.end)) # (512, 1535)
# Clear part of that page
self.service.clear_page(container_name, blob_name, 1024, 1535)
# Take a page range diff between two versions of page blob
snapshot = self.service.snapshot_blob(container_name, blob_name)
self.service.update_page(container_name, blob_name, data, 0, 1023)
ranges = self.service.get_page_ranges_diff(container_name, blob_name, snapshot.snapshot)
for range in ranges:
print('({}, {}, {}) '.format(range.start, range.end, range.is_cleared)) # (0, 511, False)
self.service.delete_container(container_name)
def set_sequence_number(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
# Create with a page number (default sets to 0)
self.service.create_blob(container_name, blob_name, 2048, sequence_number=1)
# Increment
properties = self.service.set_sequence_number(container_name, blob_name,
sequence_number_action=SequenceNumberAction.Increment)
sequence_number = properties.sequence_number # 2
# Update
properties = self.service.set_sequence_number(container_name, blob_name,
sequence_number_action=SequenceNumberAction.Update,
sequence_number=5)
sequence_number = properties.sequence_number # 5
# Max
# Takes the larger of the two sequence numbers
properties = self.service.set_sequence_number(container_name, blob_name,
sequence_number_action=SequenceNumberAction.Max,
sequence_number=3)
sequence_number = properties.sequence_number # 5
|
the-stack_106_22331 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.communication import (recv_backward, recv_forward, recv_tensor_meta, send_backward,
send_backward_recv_forward, send_forward, send_forward_recv_backward,
send_tensor_meta)
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.logging import get_dist_logger
from colossalai.utils import free_port, get_current_device
BATCH_SIZE = 4
SEQ_LENGTH = 2
HIDDEN_SIZE = 16
CONFIG = dict(parallel=dict(pipeline=dict(size=4), tensor=dict(size=1, mode=None)), seed=1024)
def check_equal(A, B):
return torch.allclose(A, B, rtol=1e-5, atol=1e-3)
def check_forward(output_tensor, rank, logger):
dist.barrier()
if gpc.is_first_rank(ParallelMode.PIPELINE):
tensor = output_tensor.clone()
else:
tensor = recv_forward(output_tensor.shape)
logger.info('Rank {} received forward. Correct tensor: {}'.format(rank, check_equal(tensor, output_tensor)))
if not gpc.is_last_rank(ParallelMode.PIPELINE):
send_forward(tensor)
logger.info('Rank {} sent forward.'.format(rank))
def check_backward(output_grad, rank, logger):
dist.barrier()
if gpc.is_last_rank(ParallelMode.PIPELINE):
grad = output_grad.clone()
else:
grad = recv_backward(output_grad.shape)
logger.info('Rank {} received backward. Correct grad: {}'.format(rank, check_equal(grad, output_grad)))
if not gpc.is_first_rank(ParallelMode.PIPELINE):
send_backward(grad)
logger.info('Rank {} sent backward.'.format(rank))
def check_forward_backward(output_tensor, output_grad, rank, logger):
dist.barrier()
if not gpc.is_first_rank(ParallelMode.PIPELINE):
tensor = send_backward_recv_forward(output_grad, output_tensor.shape)
logger.info('Rank {} sent backward received forward. Correct tensor: {}'.format(
rank, check_equal(tensor, output_tensor)))
if not gpc.is_last_rank(ParallelMode.PIPELINE):
grad = send_forward_recv_backward(output_tensor, output_grad.shape)
logger.info('Rank {} sent forward received backward. Correct grad: {}'.format(
rank, check_equal(grad, output_grad)))
def check_comm(size, rank, prev_rank, next_rank, logger):
dtype = torch.float32
device = get_current_device()
tensor_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE)
grad_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE)
tensor = torch.randn(tensor_shape, dtype=dtype, device=device)
dist.all_reduce(tensor)
grad = torch.randn(grad_shape, dtype=dtype, device=device)
dist.all_reduce(grad)
check_forward(tensor, rank, logger)
check_backward(grad, rank, logger)
check_forward_backward(tensor, grad, rank, logger)
def run_check(rank, world_size, port):
launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
logger = get_dist_logger()
rank = gpc.get_global_rank()
prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE)
next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE)
logger.info('Rank {0}: prev rank {1}, next rank {2}'.format(rank, prev_rank, next_rank))
logger.info('Distributed environment is initialzied.')
check_comm(world_size, rank, prev_rank, next_rank, logger)
gpc.destroy()
torch.cuda.empty_cache()
@pytest.mark.dist
def test_p2p():
world_size = 4
run_func = partial(run_check, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_p2p()
|
the-stack_106_22333 | from setuptools import setup, find_packages
import os
import re
import sys
v = open(os.path.join(os.path.dirname(__file__), 'mako', '__init__.py'))
VERSION = re.compile(r".*__version__ = '(.*?)'", re.S).match(v.read()).group(1)
v.close()
readme = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
markupsafe_installs = (
sys.version_info >= (2, 6) and sys.version_info < (3, 0)
) or sys.version_info >= (3, 3)
if markupsafe_installs:
install_requires = ['MarkupSafe>=0.9.2']
else:
install_requires = []
setup(name='Mako',
version=VERSION,
description="A super-fast templating language that borrows the \
best ideas from the existing templating languages.",
long_description=readme,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
keywords='templates',
author='Mike Bayer',
author_email='[email protected]',
url='http://www.makotemplates.org/',
license='MIT',
packages=find_packages('.', exclude=['examples*', 'test*']),
scripts=['scripts/mako-render'],
tests_require=['nose >= 0.11'],
test_suite="nose.collector",
zip_safe=False,
install_requires=install_requires,
extras_require={'beaker': ['Beaker>=1.1']},
entry_points="""
[python.templating.engines]
mako = mako.ext.turbogears:TGPlugin
[pygments.lexers]
mako = mako.ext.pygmentplugin:MakoLexer
html+mako = mako.ext.pygmentplugin:MakoHtmlLexer
xml+mako = mako.ext.pygmentplugin:MakoXmlLexer
js+mako = mako.ext.pygmentplugin:MakoJavascriptLexer
css+mako = mako.ext.pygmentplugin:MakoCssLexer
[babel.extractors]
mako = mako.ext.babelplugin:extract
"""
)
|
the-stack_106_22334 | import json
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import requests
import datetime
ekb = (56.688468, 56.988468, 60.45337, 60.75337)
today = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
data = requests.get('https://www.gorses.na4u.ru/data/COVID.json').json()
coords = []
for i in data['features']:
coords.append(i['geometry']['coordinates'])
coords = pd.DataFrame(coords)
coords=coords[coords[0]>ekb[0]]
coords=coords[coords[0]<ekb[1]]
coords=coords[coords[1]>ekb[2]]
coords=coords[coords[1]<ekb[3]]
norm_coords = (coords-[ekb[0], ekb[2]]).round(2).mul(100)
stats = np.zeros(shape=(32,32))
for i in norm_coords.iterrows():
stats[int(i[1][0]),int(i[1][1])] +=1
fig = plt.figure(frameon=False)
fig.set_size_inches(50,50)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
plt.imshow(stats/stats.max(), aspect='auto', cmap='gist_earth')
plt.savefig(f'{today}.png')
print(f'generated {today}, white is: {stats.max()}')
|
the-stack_106_22335 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import sys
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTopk(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "top_k"
self.init_dtype()
x = np.array([[0.78104149, 0.88745828, 0.32362268],
[0.82196718, 0.48763277, 0.42826136],
[0.96527182, 0.34851612, 0.12959783]]).astype(self.dtype)
self.inputs = {'X': x}
np_out = np.array(
[[0.88745828], [0.82196718], [0.96527182]]).astype(self.dtype)
np_indices = np.array([[1], [0], [0]])
self.attrs = {'k': 1, "axis": -1}
self.outputs = {'Out': np_out, 'Indices': np_indices}
def set_npu(self):
self.__class__.use_npu = True
self.__class__.no_need_check_grad = True
def init_dtype(self):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTopkV2(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "top_k"
self.init_dtype()
x = np.array([[0.78104149, 0.88745828, 0.32362268],
[0.82196718, 0.48763277, 0.42826136],
[0.96527182, 0.34851612, 0.12959783]]).astype(self.dtype)
self.inputs = {'X': x}
np_out = np.array([[0.88745828, 0.78104149], [0.82196718, 0.48763277],
[0.96527182, 0.34851612]]).astype(self.dtype)
np_indices = np.array([[1, 0], [0, 1], [0, 1]])
self.attrs = {'k': 2, "axis": -1}
self.outputs = {'Out': np_out, 'Indices': np_indices}
def set_npu(self):
self.__class__.use_npu = True
self.__class__.no_need_check_grad = True
def init_dtype(self):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_22336 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import urllib2
import json
from gaiatest.mocks.mock_user import MockUser
class PersonaTestUser:
"""
A base test class that can be extended by other tests to include utility methods.
API docs: https://github.com/mozilla/personatestuser.org#api
Usage:
verified = bool:
Verified refers to the user's account and password already approved and set up
env = str:
Strings "dev", "stage" or "prod" will return users for the respective environments
If "None" a production Persona user will be returned.
env = dict:
For custom browserid databases and verifiers
self.user = PersonaTestUser().create_user(verified=True,
env={"browserid":"firefoxos.persona.org", "verifier":"firefoxos.123done.org"})
"""
def create_user(self, verified=False, env=None):
if verified:
url = "http://personatestuser.org/email/"
else:
url = "http://personatestuser.org/unverified_email/"
if type(env) is str:
url += env
elif type(env) is dict:
url += "custom?"
for index, i in enumerate(env):
if index > 0:
url += "&"
url += "%s=%s" % (i, env[i])
try:
# ptu.org will fail with a 400 if the parameters are invalid
response = urllib2.urlopen(url)
except urllib2.URLError as e:
raise Exception("Could not get Persona user from personatestuser.org: %s" % e.reason)
decode = json.loads(response.read())
return MockUser(email=decode['email'], password=decode['pass'], name=decode['email'].split('@')[0])
|
the-stack_106_22337 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import io
import os
import sys
import numpy as np # type: ignore
from onnx import defs, FunctionProto, helper, OperatorStatus
from onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN
from onnx.backend.test.case import collect_snippets
from onnx.backend.sample.ops import collect_sample_implementations
from typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple
#controls on ONNF code gen
#specify attr default value
special_attr_defaults = dict([
# ("AveragePool "+"kernel_shape", ('ints', '{}')),
# ("MaxPool "+"kernel_shape", ('ints', '{}')),
# ("Cast "+"to", ('int', '0')),
# ("Concat "+"axis", ('int', '0')),
# ("Conv "+"group", ('int', '1')),
# ("Unsqueeze "+"axes", ('ints', '{}')),
# ("RNN "+"activation_alpha", ('floats', '{}')),
# ("RNN "+"activation_beta", ('floats', '{}')),
])
#specify the function name in src/builder/frontend_dialect_transformer.cpp
#the reason for Conv and MaPool is to handled optional arguments
special_op_handler = dict([
("Conv", "ImportNodeConv"),
("MaxPool", "ImportNodeMaxPool"),
("Gemm", "ImportNodeGemm"),
("Pad", "ImportNodePad"),
#("Transpose", "ImportNodeTranspose")
])
#add an Op in this list if ShapeInterference is defined for this Op
ShapeInferenceList=['Exp', 'Tanh', 'Sinh', 'Cosh', 'Sigmoid', 'Relu',
'Add', 'Mul', 'Div', 'Sub', 'And', 'Or', 'Xor',
'Sum', 'Max', 'Min', 'MatMul', 'Gemm', 'LeakyRelu',
'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal',
'Identity', 'Cos', 'Log', 'Transpose', 'Softmax',
'ReduceMax', 'ReduceMin', 'ReduceProd', 'ReduceSum',
'Softplus', 'Softsign', 'Sqrt', 'Unsqueeze', 'Sign']
CanonicalList=['Add', 'Identity', 'ReduceL1', 'ReduceL2', 'ReduceLogSum',
'ReduceLogSumExp', 'ReduceSumSquare']
manual_code_in_op_def = dict([
('DummyExample', ' let extraClassDeclaration = [{ \n'+
' static StringRef getPermAttrName() { return "perm"; }\n'+
' }];\n')
])
SNIPPETS = collect_snippets()
SAMPLE_IMPLEMENTATIONS = collect_sample_implementations()
ONNX_ML = not bool(os.getenv('ONNX_ML') == '0')
ONNX_ML = False
print("ONNX_ML", ONNX_ML)
if ONNX_ML:
ext = '-ml.md'
else:
ext = '.md'
def display_number(v): # type: (int) -> Text
if defs.OpSchema.is_infinite(v):
return '∞'
return Text(v)
def should_render_domain(domain): # type: (Text) -> bool
if domain == ONNX_ML_DOMAIN and not ONNX_ML:
return False
elif ONNX_ML and domain != ONNX_ML_DOMAIN:
return False
return True
def format_name_with_domain(domain, schema_name): # type: (Text, Text) -> Text
if domain:
return '{}.{}'.format(domain, schema_name)
else:
return schema_name
def display_attr_type(v): # type: (OpSchema.AttrType) -> Text
assert isinstance(v, OpSchema.AttrType)
s = Text(v)
s = s[s.rfind('.') + 1:].lower()
if s[-1] == 's':
s = 'list of ' + s
return s
def display_domain(domain): # type: (Text) -> Text
if domain:
return "the '{}' operator set".format(domain)
else:
return "the default ONNX operator set"
def display_domain_short(domain): # type: (Text) -> Text
if domain:
return domain
else:
return 'ai.onnx (default)'
def display_version_link(name, version): # type: (Text, int) -> Text
changelog_md = 'Changelog' + ext
name_with_ver = '{}-{}'.format(name, version)
return '<a href="{}#{}">{}</a>'.format(changelog_md, name_with_ver, name_with_ver)
def get_unique_output_name(schema, name):
for input in schema.inputs :
if input.name == name :
return 'out_'+name
return name
def display_schema(schema, versions): # type: (OpSchema, Sequence[OpSchema]) -> Text
s = ''
# doc
if schema.doc:
s += '\n'
s += '\n'.join(' ' + line
for line in schema.doc.lstrip().splitlines())
s += '\n'
# since version
s += '\n#### Version\n'
if schema.support_level == OpSchema.SupportType.EXPERIMENTAL:
s += '\nNo versioning maintained for experimental ops.'
else:
s += '\nThis version of the operator has been ' + ('deprecated' if schema.deprecated else 'available') + ' since version {}'.format(schema.since_version)
s += ' of {}.\n'.format(display_domain(schema.domain))
if len(versions) > 1:
# TODO: link to the Changelog.md
s += '\nOther versions of this operator: {}\n'.format(
', '.join(display_version_link(format_name_with_domain(v.domain, v.name),
v.since_version) for v in versions[:-1]))
# If this schema is deprecated, don't display any of the following sections
if schema.deprecated:
return s
# attributes
if schema.attributes:
s += '\n#### Attributes\n\n'
s += '<dl>\n'
for _, attr in sorted(schema.attributes.items()):
# option holds either required or default value
opt = ''
if attr.required:
opt = 'required'
elif attr.default_value.name:
default_value = helper.get_attribute_value(attr.default_value)
def format_value(value): # type: (Any) -> Text
if isinstance(value, float):
formatted = str(np.round(value, 5))
# use default formatting, unless too long.
if (len(formatted) > 10):
formatted = str("({:e})".format(value))
return formatted
elif isinstance(value, (bytes, bytearray)) and sys.version_info[0] == 3:
return str(value.decode('utf-8'))
return str(value)
if isinstance(default_value, list):
default_value = [format_value(val) for val in default_value]
else:
default_value = format_value(default_value)
opt = 'default is {}'.format(default_value)
s += '<dt><tt>{}</tt> : {}{}</dt>\n'.format(
attr.name,
display_attr_type(attr.type),
' ({})'.format(opt) if opt else '')
s += '<dd>{}</dd>\n'.format(attr.description)
s += '</dl>\n'
# inputs
s += '\n#### Inputs'
if schema.min_input != schema.max_input:
s += ' ({} - {})'.format(display_number(schema.min_input),
display_number(schema.max_input))
s += '\n\n'
if schema.inputs:
s += '<dl>\n'
for input in schema.inputs:
option_str = ""
if OpSchema.FormalParameterOption.Optional == input.option:
option_str = " (optional)"
elif OpSchema.FormalParameterOption.Variadic == input.option:
if input.isHomogeneous:
option_str = " (variadic)"
else:
option_str = " (variadic, heterogeneous)"
s += '<dt><tt>{}</tt>{} : {}</dt>\n'.format(input.name, option_str, input.typeStr)
s += '<dd>{}</dd>\n'.format(input.description)
s += '</dl>\n'
# outputs
s += '\n#### Outputs'
if schema.min_output != schema.max_output:
s += ' ({} - {})'.format(display_number(schema.min_output),
display_number(schema.max_output))
s += '\n\n'
if schema.outputs:
s += '<dl>\n'
for output in schema.outputs:
option_str = ""
if OpSchema.FormalParameterOption.Optional == output.option:
option_str = " (optional)"
elif OpSchema.FormalParameterOption.Variadic == output.option:
if output.isHomogeneous:
option_str = " (variadic)"
else:
option_str = " (variadic, heterogeneous)"
s += '<dt><tt>{}</tt>{} : {}</dt>\n'.format(get_unique_output_name(schema, output.name), option_str, output.typeStr)
s += '<dd>{}</dd>\n'.format(output.description)
s += '</dl>\n'
# type constraints
s += '\n#### Type Constraints'
s += '\n\n'
if schema.type_constraints:
s += '<dl>\n'
for type_constraint in schema.type_constraints:
allowedTypes = type_constraint.allowed_type_strs
if (len(allowedTypes) > 0):
allowedTypeStr = allowedTypes[0]
for allowedType in allowedTypes[1:]:
allowedTypeStr += ', ' + allowedType
s += '<dt><tt>{}</tt> : {}</dt>\n'.format(
type_constraint.type_param_str, allowedTypeStr)
s += '<dd>{}</dd>\n'.format(type_constraint.description)
s += '</dl>\n'
# Function Body
if schema.has_function: # type: ignore
s += '\n#### Function\n'
s += '\nThe Function can be represented as a function.\n'
return s
def support_level_str(level): # type: (OpSchema.SupportType) -> Text
return \
"<sub>experimental</sub> " if level == OpSchema.SupportType.EXPERIMENTAL else ""
def convert_type(tstr) :
tfrom = np.array(['bool', 'int8', 'int16', 'int32', 'int64',
'unkown', 'float16', 'float', 'double'])
tto =np.array(['I1', 'I8', 'I16', 'I32', 'I64',
'BF16', 'F16', 'F32', 'F64'])
index = -1
for i in range(len(tfrom)) :
if tfrom[i] in tstr :
index = i
break
if index == -1 :
print("error", tstr)
return ''
else :
return tto[i]
def collect_types(schema, input) :
allowedTypeStr=''
#first step just ignore the type constraints
return allowedTypeStr
if input.typeStr :
tstr = input.typeStr
else :
return allwedTypeStr
if schema.type_constraints:
for type_constraint in schema.type_constraints:
if type_constraint.type_param_str != tstr :
continue
allowedTypes = type_constraint.allowed_type_strs
allowedTypeStr=''
if (len(allowedTypes) > 0):
t = convert_type(allowedTypes[0])
if t == '' :
return ''
allowedTypeStr += t
for allowedType in allowedTypes[1:]:
t = convert_type(allowedType)
if t == '' :
return ''
if not t in allowedTypeStr :
allowedTypeStr += ', '+t
return allowedTypeStr
return allowedTypeStr
def gen_schema(schema) :
line_indent = ' '
#s = 'def ONNX'+schema.name+str(schema.since_version)+'Op:ONNX_Op<"'+schema.name+'", \n'
s = 'def ONNX'+schema.name+'Op:ONNX_Op<"'+schema.name+'", \n'
s += line_indent+' [NoSideEffect'
if schema.name in ShapeInferenceList :
s+= ', DeclareOpInterfaceMethods<ShapeInferenceOpInterface>'
s += ']> {'
if schema.name in CanonicalList:
s += '\n'+line_indent+'let hasCanonicalizer = 1;'
#summary
s += '\n'+line_indent
s += 'let summary = "ONNX '+schema.name+' operation";'
#description
s += '\n'+line_indent
s += 'let description = [{'
if schema.doc:
"""
s += '\n'.join(line_indent + line
for line in schema.doc.lstrip().splitlines())
"""
for line in schema.doc.lstrip().splitlines():
line = line.replace('}]', '\}\]')
s += '\n'+line_indent+' '+'"'+line+'"'
else :
s += '\n'+line_indent*2 +'no doc for this op from onnx'
s += '\n'+line_indent+'}];'
#input
s+= '\n'+line_indent+'let arguments = (ins '
isfirst = True
if schema.inputs:
isfirst = False
for input in schema.inputs:
if input != schema.inputs[0] :
s+= ',\n '
etypes=collect_types(schema, input)
if OpSchema.FormalParameterOption.Optional == input.option:
#TODO: handle optional
print("warning: optional input for"+schema.name+' '+input.name)
elif OpSchema.FormalParameterOption.Variadic == input.option:
if input.isHomogeneous:
s+= 'Variadic<'
else:
#TODO handle (variadic, heterogeneous)"
print("warning: (variadic, heterogeneous) for"+schema.name+' '+input.name)
if etypes == '':
s+= 'AnyTypeOf<[AnyMemRef, AnyTensor]>'
else:
s+= 'TensorOf<['+etypes+']>'
if OpSchema.FormalParameterOption.Optional == input.option:
#TODO: handle optional
t=''
elif OpSchema.FormalParameterOption.Variadic == input.option:
if input.isHomogeneous:
s+= '>'
else:
#TODO handle (variadic, heterogeneous)"
t=''
s+=':$'+input.name
s += gen_attr_ins(schema, isfirst)
s+= ');'
#output
s+= '\n'+line_indent+'let results = (outs '
if schema.outputs:
for output in schema.outputs:
if output != schema.outputs[0] :
s+= ',\n '
#need to interpret output.typeStr
etypes=collect_types(schema, output)
if etypes == '':
s+= 'AnyTypeOf<[AnyMemRef, AnyTensor]>'
else:
s+= 'TensorOf<['+etypes+']>'
s += ':$'+get_unique_output_name(schema, output.name)
s+= ');\n'
#s+= 'let hasCanonicalizer = 1;'
#add special code
if schema.name in manual_code_in_op_def :
s += manual_code_in_op_def[schema.name]
s += '}\n\n'
return s
"""
special cases:
* Split: attr split default value: sizeof(output1) namely 1
* Conv: attr dilations default value is {num_dim of first input - 2, 1}
* Conv: attr kernel_shape type is ints
* Transpose: attr perm default value is {} empty int list
"""
def gen_code(schema,fefile) :
handle_variadic = False
line_indent = ' '
fefile.write(' '+'}else if (OpName == "'+schema.name+'") {\n')
op_type_str='mlir::ONNX'+schema.name+'Op'
if schema.name in special_op_handler :
fefile.write(' '+special_op_handler[schema.name]+'(node, '
+str(len(schema.inputs))
+', ' +str(len(schema.outputs)))
elif len(schema.outputs) > 1 :
fefile.write(' '+'ImportNodeMultipleOuts<'+op_type_str+'>(node, '
+str(len(schema.inputs))
+', ' +str(len(schema.outputs)))
else :
fefile.write(' '+'ImportNodeOneOut<'+op_type_str+'>(node, '
+str(len(schema.inputs))
+', ' +str(len(schema.outputs)))
variadicIn = 'false'
variadicOut = 'false'
for input in schema.inputs:
if OpSchema.FormalParameterOption.Variadic == input.option:
if input.isHomogeneous:
variadicIn = 'true'
handle_variadic = True
for output in schema.outputs:
if OpSchema.FormalParameterOption.Variadic == output.option:
if output.isHomogeneous:
variadicOut = 'true'
if not handle_variadic:
fefile.write(');\n')
else:
fefile.write(', '+variadicIn+', '+variadicOut+');\n')
def gen_attr_ins(schema, isfirst) :
def get_attr_type_basic(attr_type) :
if attr_type == 'int' :
mytype = 'I64Attr'
elif attr_type == 'float' :
mytype = 'F32Attr'
elif attr_type == 'ints' :
mytype = 'I64ArrayAttr'
elif attr_type == 'floats' :
mytype = 'F32ArrayAttr'
elif attr_type == "string" :
mytype = 'StrAttr'
elif attr_type == "strings" :
mytype = 'StrArrayAttr'
else :
mytype ='AnyAttr'
#TODO: tensor and sparse tensor
return mytype
def get_attr_type_optional(attr_type) :
mytype = 'OptionalAttr<'
mytype += get_attr_type_basic(attr_type)
mytype += '>'
return mytype
def get_attr_type_with_default(attr_type, attr_default) :
mytype = 'DefaultValuedAttr<'
mytype += get_attr_type_basic(attr_type)
mytype += ', "'+attr_default+'">'
return mytype
attr_line = ''
if schema.attributes:
for _, attr in sorted(schema.attributes.items()):
#attr_line = line_indent+line_indent+line_indent+line_indent
if not isfirst:
attr_line += ',\n '
else :
isfirst = False
if schema.name+' '+attr.name in special_attr_defaults:
(attr_type_str, attr_default_str) = special_attr_defaults[schema.name+' '+attr.name]
attr_line += get_attr_type_with_default(attr_type_str, attr_default_str)
attr_line += ':$'+attr.name
elif attr.required:
s = Text(attr.type)
attr_type_str = s[s.rfind('.') + 1:].lower()
attr_line += get_attr_type_basic(attr_type_str)
attr_line += ':$'+attr.name
# option holds either required or default value
elif attr.default_value.name:
s = Text(attr.type)
attr_type_str = s[s.rfind('.') + 1:].lower()
default_value = helper.get_attribute_value(attr.default_value)
def format_value(value): # type: (Any) -> Text
if isinstance(value, float):
formatted = str(np.round(value, 5))
# use default formatting, unless too long.
if (len(formatted) > 10):
formatted = str("({:e})".format(value))
return formatted
elif isinstance(value, (bytes, bytearray)) and sys.version_info[0] == 3:
return str(value.decode('utf-8'))
return str(value)
if isinstance(default_value, list):
default_value = [format_value(val) for val in default_value]
attr_option_str = '{}'.format(default_value)
attr_option_str = attr_option_str.replace('[', '{', 1)
attr_option_str = attr_option_str.replace(']', '}', 1)
if attr_type_str == 'strings' :
attr_option_str = attr_option_str.replace("'", '\\"')
else :
attr_option_str = attr_option_str.replace("'", '')
else:
default_value = format_value(default_value)
attr_option_str = default_value
attr_line += get_attr_type_with_default(attr_type_str, attr_option_str)
attr_line += ':$'+attr.name
else:
s = Text(attr.type)
attr_type_str = s[s.rfind('.') + 1:].lower()
attr_line += get_attr_type_optional(attr_type_str)
attr_line += ':$'+attr.name
return attr_line
def main(args): # type: (Type[Args]) -> None
with io.open(args.changelog, 'w', newline='') as fout:
fout.write('## Operator Changelog\n')
fout.write(
"*This file is automatically generated from the\n"
" [def files](/onnx/defs) via [this script](/onnx/defs/gen_doc.py).\n"
" Do not modify directly and instead edit operator definitions.*\n")
# domain -> version -> [schema]
dv_index = defaultdict(lambda: defaultdict(list)) # type: Dict[Text, Dict[int, List[OpSchema]]]
for schema in defs.get_all_schemas_with_history():
dv_index[schema.domain][schema.since_version].append(schema)
fout.write('\n')
for domain, versionmap in sorted(dv_index.items()):
if not should_render_domain(domain):
continue
s = '# {}\n'.format(display_domain_short(domain))
for version, unsorted_schemas in sorted(versionmap.items()):
s += '## Version {} of {}\n'.format(version, display_domain(domain))
for schema in sorted(unsorted_schemas, key=lambda s: s.name):
name_with_ver = '{}-{}'.format(format_name_with_domain(domain, schema.name),
schema.since_version)
s += ('### <a name="{}"></a>**{}**' + (' (deprecated)' if schema.deprecated else '') + '</a>\n').format(name_with_ver, name_with_ver)
s += display_schema(schema, [schema])
s += '\n'
fout.write(s)
with io.open(args.output, 'w', newline='', encoding="utf-8") as fout:
fout.write('## Operator Schemas\n')
fout.write(
"*This file is automatically generated from the\n"
" [def files](/onnx/defs) via [this script](/onnx/defs/gen_doc.py).\n"
" Do not modify directly and instead edit operator definitions.*\n")
# domain -> support level -> name -> [schema]
index = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]
for schema in defs.get_all_schemas_with_history():
#print("check point 0", schema.name, schema.domain, schema.support_level)
#gen_schema(schema)
index[schema.domain][int(schema.support_level)][schema.name].append(schema)
fout.write('\n')
# Preprocess the Operator Schemas
# [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]
operator_schemas = list() # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]
exsting_ops = set() # type: Set[Text]
for domain, _supportmap in sorted(index.items()):
if not should_render_domain(domain):
continue
processed_supportmap = list()
for _support, _namemap in sorted(_supportmap.items()):
processed_namemap = list()
for n, unsorted_versions in sorted(_namemap.items()):
versions = sorted(unsorted_versions, key=lambda s: s.since_version)
schema = versions[-1]
#print("check point 2", schema)
if schema.name in exsting_ops:
continue
exsting_ops.add(schema.name)
processed_namemap.append((n, schema, versions))
processed_supportmap.append((_support, processed_namemap))
operator_schemas.append((domain, processed_supportmap))
# Table of contents
for domain, supportmap in operator_schemas:
s = '* {}\n'.format(display_domain_short(domain))
fout.write(s)
function_ops = list()
for _, namemap in supportmap:
for n, schema, versions in namemap:
if schema.has_function: # type: ignore
function_ops.append((n, schema, versions))
continue
s = ' * {}<a href="#{}">{}</a>\n'.format(
support_level_str(schema.support_level),
format_name_with_domain(domain, n),
format_name_with_domain(domain, n))
fout.write(s)
if len(function_ops):
fout.write('\n')
fout.write(' **Operators with function registered:**\n')
for n, schema, versions in function_ops:
s = ' * {}<a href="#{}">{}</a>\n'.format(
support_level_str(schema.support_level),
format_name_with_domain(domain, n),
format_name_with_domain(domain, n))
fout.write(s)
fout.write('\n')
tdfile= io.open(args.tdfile, 'w', newline='')
tdfile.write('//********************************************************\n'+
'// Warning: Do not modify this file directly\n'+
'// This file is automatically generated via script\n'+
'// Details can be found in doc/readonnxdefs.md\n'+
'//********************************************************\n\n'
)
fefile=io.open('op_build_table.inc', 'w', newline='')
firstfunc = True
fefile.write('//********************************************************\n'+
'// Warning: Do not modify this file directly\n'+
'// This file is automatically generated via script\n'+
'// Details can be found in doc/readonnxdefs.md\n'+
'//********************************************************\n\n'
)
fefile.write(' '+'if (OpName == "DUMMY") {\n')
for domain, supportmap in operator_schemas:
s = '## {}\n'.format(display_domain_short(domain))
fout.write(s)
for _, namemap in supportmap:
for op_type, schema, versions in namemap:
# op_type
#print("check point 1", schema.name, len(schema.inputs), len(schema.outputs))
gen_code(schema, fefile)
r = gen_schema(schema)
tdfile.write(r)
s = ('### {}<a name="{}"></a><a name="{}">**{}**' + (' (deprecated)' if schema.deprecated else '') + '</a>\n').format(
support_level_str(schema.support_level),
format_name_with_domain(domain, op_type),
format_name_with_domain(domain, op_type.lower()),
format_name_with_domain(domain, op_type))
s += display_schema(schema, versions)
s += '\n\n'
if op_type in SNIPPETS:
s += '#### Examples\n\n'
for summary, code in sorted(SNIPPETS[op_type]):
s += '<details>\n'
s += '<summary>{}</summary>\n\n'.format(summary)
s += '```python\n{}\n```\n\n'.format(code)
s += '</details>\n'
s += '\n\n'
if op_type.lower() in SAMPLE_IMPLEMENTATIONS:
s += '#### Sample Implementation\n\n'
s += '<details>\n'
s += '<summary>{}</summary>\n\n'.format(op_type)
s += '```python\n{}\n```\n\n'.format(SAMPLE_IMPLEMENTATIONS[op_type.lower()])
s += '</details>\n'
s += '\n\n'
fout.write(s)
fefile.write(' }')
fefile.close()
if __name__ == '__main__':
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
docs_dir = os.path.join(base_dir, 'docs')
print(docs_dir)
class Args(object):
output = os.path.join(docs_dir, 'Operators' + ext)
changelog = os.path.join(docs_dir, 'Changelog' + ext)
tdfile = os.path.join(base_dir, 'onnxop.inc')
print(Args)
main(Args)
|
the-stack_106_22340 | """
Network analysis module.
"""
import os
import ipaddress
import collections
import geoip2.database
import logging.config
import disspcap
from lisa.core.base import AbstractSubAnalyzer
from lisa.analysis.anomaly import Anomaly
from lisa.config import lisa_path, logging_config
logging.config.dictConfig(logging_config)
log = logging.getLogger()
with open(f'{lisa_path}/data/ipblacklist') as f:
ipblacklist = []
for line in f:
iprange = line.rstrip('\n').split('-')
iprange = (int(ipaddress.ip_address(iprange[0])),
int(ipaddress.ip_address(iprange[1])))
ipblacklist.append(iprange)
def is_ip_blacklisted(ipaddr):
"""Binary searches whether ip address is in a
blacklist (reported malicious ip).
:param ipaddr: String IP address.
:returns: True x False.
"""
ip = int(ipaddress.ip_address(ipaddr))
low = 0
high = len(ipblacklist) - 1
while low <= high:
mid = low + (high - low) // 2
ipmid = ipblacklist[mid]
if ip >= ipmid[0] and ip <= ipmid[1]:
return True
elif ip < ipmid[0]:
high = mid - 1
elif ip > ipmid[1]:
low = mid + 1
return False
def is_ip_local(ipaddr):
"""Returns True if ip address is in local range.
:param ipaddr: String IP address.
:returns: True x False.
"""
ip = int(ipaddress.ip_address(ipaddr))
# 10.x.x.x
if ip >= 167772160 and ip < 184549376:
return True
# 172.16.0.0 – 172.31.255.255
if ip >= 2886729728 and ip < 2887778304:
return True
# 192.168.x.x
if ip >= 3232235520 and ip < 3232301056:
return True
return False
class NetworkAnalyzer(AbstractSubAnalyzer):
"""Provides networking analysis.
:param file: AnalyzedFile's object.
:param pcap_path: Path of pre-captured pcap.
:param ip_address: Local IP of capturing interface.
"""
def __init__(self, file, pcap_path=None, ip_address=None):
super().__init__(file)
self._anomalies = []
self._syn_count = 0
self._fin_count = 0
self._syn_count_local = 0
self._fin_count_local = 0
self._endpoints = []
self._dns_questions = set()
self._irc_messages = []
self._http_requests = []
self._telnet_data = []
self._port_statistics = {
'TCP': collections.Counter(),
'UDP': collections.Counter()
}
# set up maxmind geoip2 databases
self._maxmind = False
city = f'{lisa_path}/data/geolite2databases/GeoLite2-City.mmdb'
asn = f'{lisa_path}/data/geolite2databases/GeoLite2-ASN.mmdb'
if os.path.isfile(city) and os.path.isfile(asn):
self._reader_city = geoip2.database.Reader(city)
self._reader_asn = geoip2.database.Reader(asn)
self._maxmind = True
if pcap_path:
self._pcap_path = os.path.abspath(pcap_path)
else:
self._pcap_path = f'{self._file.data_dir}/capture.pcap'
self._local_ip = None
if file is not None:
self._local_ip = '10.0.2.15'
@property
def pcap_path(self):
"""Path of analyzed pcap."""
return self._pcap_path
@pcap_path.setter
def pcap_path(self, pcap_path):
"""Path of analyzed pcap."""
self._pcap_path = pcap_path
def run_analysis(self):
"""Main analysis method.
:returns: Dictionary containing analysis results.
"""
log.debug('NetworkAnalyzer started.')
# pcap analysis
self.analyze_pcap()
log.debug('NetworkAnalyzer finished.')
return self._output
def _analyze_endpoint(self, ip):
"""Returns information about endpoint (IP address)
:param ip: String representation of IP address.
:param port: Port number.
:returns: Dictionary with endpoint information.
"""
if self._maxmind:
# get maxmind geolite2 info
try:
rc = self._reader_city.city(ip)
ra = self._reader_asn.asn(ip)
endpoint = {
'ip': ip,
'ports': [],
'country': rc.country.name,
'city': rc.city.name,
'asn': ra.autonomous_system_number,
'organization': ra.autonomous_system_organization,
'blacklisted': is_ip_blacklisted(ip),
'data_in': 0,
'data_out': 0
}
except geoip2.errors.AddressNotFoundError as e:
endpoint = {
'ip': ip,
'ports': [],
'blacklisted': is_ip_blacklisted(ip),
'data_in': 0,
'data_out': 0
}
else:
endpoint = {
'ip': ip,
'ports': [],
'blacklisted': is_ip_blacklisted(ip),
'data_in': 0,
'data_out': 0
}
# report anomaly
if endpoint['blacklisted']:
name = 'blacklisted_ip_access'
description = ('Sample contacted endpoint with IP '
'address present on blacklist.')
data = {
'ip_address': ip
}
anomaly = Anomaly(name, description, data)
self._anomalies.append(anomaly.to_dict())
return endpoint
def _l7_analysis(self, packet):
"""Analyzes application level of pcap.
:param packet: Single packet.
"""
if packet.dns:
if packet.dns.qr == 0:
# dns question
for question in packet.dns.questions:
self._dns_questions.add(question)
# report anomaly
if (
packet.dns.question_count > 499
or packet.dns.answer_count > 499
or packet.dns.authority_count > 499
or packet.dns.additional_count > 499
):
name = 'dns_header_many_records'
description = ('Sample sends DNS header with either big '
'number of questions, answers, authorities '
'or additionals.')
data = {
'question_count': packet.dns.question_count,
'answer_count': packet.dns.answer_count,
'authority_count': packet.dns.authority_count,
'additional_count': packet.dns.additional_count
}
anomaly = Anomaly(name, description, data)
self._anomalies.append(anomaly.to_dict())
if packet.http:
if packet.http.is_request:
# http request
request = {
'method': packet.http.request_method,
'uri': packet.http.request_uri,
'version': packet.http.version,
'headers': {}
}
# headers
for key in packet.http.headers:
request['headers'][key] = packet.http.headers[key]
self._http_requests.append(request)
if packet.irc:
for message in packet.irc.messages:
# irc messages
str_message = message.command
for param in message.params:
str_message += ' ' + param
str_message += ' ' + message.trailing
str_message = str_message.strip()
if str_message != '':
self._irc_messages.append(str_message)
if packet.telnet:
if len(packet.telnet.data.strip()) != 0:
self._telnet_data.append(packet.telnet.data)
def analyze_pcap(self):
"""Analyzes captured pcap file. Fills self._endpoints,
self._port_statistics, self._syn_count, self._fin_count and others.
"""
endpoints = {}
if self._local_ip is None:
self._local_ip = disspcap.most_common_ip(self._pcap_path)
pcap = disspcap.Pcap(self._pcap_path)
while True:
packet = pcap.next_packet()
if packet is None:
break
if packet.ipv4:
packet_ip = packet.ipv4
elif packet.ipv6:
packet_ip = packet.ipv6
else:
continue
# TCP communication
if packet.tcp:
if packet_ip.source == self._local_ip:
# outgoing packet
ip = packet_ip.destination
port = str(packet.tcp.destination_port)
length = packet.tcp.payload_length
if packet.tcp.syn:
# search for syn scan
self._syn_count += 1
if is_ip_local(ip):
self._syn_count_local += 1
elif packet.tcp.fin and not packet.tcp.ack:
# search for fin scan
self._fin_count += 1
if is_ip_local(ip):
self._fin_count_local += 1
if length != 0:
# analyze endpoint
if ip not in endpoints:
endpoint = self._analyze_endpoint(ip)
endpoints[ip] = endpoint
endpoints[ip]['data_out'] += length
if port not in endpoints[ip]['ports']:
endpoints[ip]['ports'].append(port)
self._port_statistics['TCP'][port] += 1
else:
# incomming packet
ip = packet_ip.source
port = str(packet.tcp.source_port)
length = packet.tcp.payload_length
if length != 0:
if ip not in endpoints:
endpoint = self._analyze_endpoint(ip)
endpoints[ip] = endpoint
endpoints[ip]['data_in'] += length
if port not in endpoints[ip]['ports']:
endpoints[ip]['ports'].append(port)
self._port_statistics['TCP'][port] += 1
# UDP communication
if packet.udp:
if packet_ip.source == self._local_ip:
# outgoing packet
ip = packet_ip.destination
port = str(packet.udp.destination_port)
length = packet.udp.payload_length
if length != 0:
# analyze endpoints
if ip not in endpoints:
endpoint = self._analyze_endpoint(ip)
endpoints[ip] = endpoint
endpoints[ip]['data_out'] += length
if port not in endpoints[ip]['ports']:
endpoints[ip]['ports'].append(port)
self._port_statistics['UDP'][port] += 1
else:
# incommming packet
ip = packet_ip.source
port = str(packet.udp.source_port)
length = packet.udp.payload_length
if length != 0:
# analyze endpoint
if ip not in endpoints:
endpoint = self._analyze_endpoint(ip)
endpoints[ip] = endpoint
endpoints[ip]['data_in'] += length
if port not in endpoints[ip]['ports']:
endpoints[ip]['ports'].append(port)
self._port_statistics['UDP'][port] += 1
self._l7_analysis(packet)
self._endpoints = list(endpoints.values())
# report anomaly
if self._syn_count > 100:
name = 'syn_scan'
description = 'Sample send more than 100 TCP SYN packets.'
data = {
'syn_count': {
'total': self._syn_count,
'local': self._syn_count_local,
'internet': self._syn_count - self._syn_count_local
}
}
anomaly = Anomaly(name, description, data)
self._anomalies.append(anomaly.to_dict())
# report anomaly
if self._fin_count > 100:
name = 'fin_scan'
description = 'Sample send more than 100 TCP FIN packets.'
data = {
'fin_count': {
'total': self._fin_count,
'local': self._fin_count_local,
'internet': self._fin_count - self._fin_count_local
}
}
anomaly = Anomaly(name, description, data)
self._anomalies.append(anomaly.to_dict())
# save pcap analysis output
self._output['anomalies'] = self._anomalies
self._output['irc_messages'] = self._irc_messages
self._output['dns_questions'] = []
# question structure
for question in self._dns_questions:
qname, qtype = question.split()
self._output['dns_questions'].append(
{
'name': qname,
'type': qtype
}
)
self._output['http_requests'] = self._http_requests
self._output['telnet_data'] = self._telnet_data
most_common_tcp = self._port_statistics['TCP'].most_common()
most_common_udp = self._port_statistics['UDP'].most_common()
ports_tcp_count = len(most_common_tcp)
ports_udp_count = len(most_common_udp)
# report anomaly
if ports_tcp_count + ports_udp_count > 100:
name = 'port_scan'
description = 'Sample communicated on more than 100 ports.'
data = {
'tcp_ports_count': ports_tcp_count,
'udp_ports_count': ports_udp_count
}
anomaly = Anomaly(name, description, data)
self._anomalies.append(anomaly.to_dict())
self._output['port_statistics'] = {
'TCP': collections.OrderedDict(most_common_tcp),
'UDP': collections.OrderedDict(most_common_udp)
}
self._output['endpoints'] = self._endpoints
|
the-stack_106_22341 | from os import getenv
from typing import Dict
from psycopg2 import OperationalError
from psycopg2.pool import SimpleConnectionPool
class Database:
pg_config: Dict = None
CONNECTION_NAME: str = None
def __init__(self):
self.CONNECTION_NAME = getenv('INSTANCE_CONNECTION_NAME')
self.pg_config = {
'user': getenv('POSTGRES_USER', '<YOUR DB USER>'),
'password': getenv('POSTGRES_PASSWORD', '<YOUR DB PASSWORD>'),
'dbname': getenv('POSTGRES_DATABASE', '<YOUR DB NAME>')
}
pg_pool = None
def __connect(self, host):
"""
Helper function to connect to Postgres
"""
self.pg_config['host'] = host
self.pg_pool = SimpleConnectionPool(1, 1, **self.pg_config)
def connect(self):
try:
self.__connect(f'/cloudsql/{self.CONNECTION_NAME}')
except OperationalError:
# If production settings fail, use local development ones
self.__connect('localhost')
def get_pool(self):
if not self.pg_pool:
self.connect()
with self.pg_pool.getconn() as conn:
return conn
def return_pool(self, pool):
self.pg_pool.putconn(pool)
def postgres_demo(self):
# Initialize the pool lazily, in case SQL access isn't needed for this
# GCF instance. Doing so minimizes the number of active SQL connections,
# which helps keep your GCF instances under SQL connection limits.
if not self.pg_pool:
self.connect()
# Remember to close SQL resources declared while running this function.
# Keep any declared in global scope (e.g. pg_pool) for later reuse.
with self.pg_pool.getconn() as conn:
cursor = conn.cursor()
cursor.execute('SELECT NOW() as now')
results = cursor.fetchone()
self.pg_pool.putconn(conn)
return str(results[0])
|
the-stack_106_22343 | import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import math
class Wall():
def __init__(self, width, thick, center=(0, 0)):
self.w = width
self.t = thick
self.cx, self.cy = center
self.x_min = self.cx - self.w
self.x_max = self.cx + self.w
self.y_min = self.cy - self.t
self.y_max = self.cy + self.t
self.orientation = 1
if self.w > self.t:
self.p1 = (self.x_min, self.cy)
self.p2 = (self.x_max, self.cy)
self.orientation = 1 # Vertical
else:
self.p1 = (self.cx, self.y_max)
self.p2 = (self.cx, self.y_min)
self.orientation = 0 # Horizontal
def potential_field_1d(x, d_0=1, alpha=0.2, d_1=1.8, d_2=0.01):
d_0 = d_0
alpha = alpha
d_1 = d_1
d_2 = d_2
if x < 0.4:
V = 3
else:
V = 0
return V
def potential_field_1d_force(x, d_0=1, alpha=10, d_1=10, d_2=0.01):
d_0 = d_0
alpha = alpha
d_1 = d_1
d_2 = d_2
u = alpha*(1/x - d_0/math.pow(x, 2))
u = np.max([u, -100])
return u
def rejection_field_1d_force(x, d_0=1, alpha=10, d_1=10, d_2=0.01):
d_0 = d_0
alpha = alpha
d_1 = d_1
d_2 = d_2
u = alpha*(1/x - d_0/math.pow(x, 2))
u = np.max([u, -100])
u = np.min([u, 0])
return u
def positivity(x):
if x >= 0:
return 1
else:
return -1
def potential_field_wall(x, y, Wall, alpha=0):
t_num = (Wall.p1[0] - x)*(Wall.p2[0] - Wall.p1[0]) + \
(Wall.p1[1] - y)*(Wall.p2[1] - Wall.p1[1])
t_den = math.pow(Wall.p2[0] - Wall.p1[0], 2) + \
math.pow(Wall.p2[1] - Wall.p1[1], 2)
t = -t_num/t_den
if 0 <= t and t <= 1:
d_num = (Wall.p2[0] - Wall.p1[0])*(Wall.p1[1] - y) - \
(Wall.p2[1] - Wall.p1[1])*(Wall.p1[0] - x)
d_num = math.fabs(d_num)
d_den = math.sqrt(t_den)
d = d_num/d_den
if Wall.orientation == 1: # Veritcal Wall
dx = alpha
dy = potential_field_1d(d) * positivity(y - Wall.p1[1])
elif Wall.orientation == 0: # Horizontal Wall
dx = potential_field_1d(d) * positivity(x - Wall.p1[0])
dy = alpha
else:
d1 = math.pow((Wall.p2[0] - x), 2) + math.pow(Wall.p2[1] - y, 2)
d2 = math.pow(Wall.p1[0] - x, 2) + math.pow(Wall.p1[1] - y, 2)
if d1 < d2:
d = math.sqrt(d1)
dx = potential_field_1d(d) * math.fabs((Wall.p2[0] - x)/d) * positivity(x - Wall.p2[0])
dy = potential_field_1d(d) * math.fabs((Wall.p2[1] - y)/d) * positivity(y - Wall.p2[1])
else:
d = math.sqrt(d2)
dx = potential_field_1d(d) * math.fabs((Wall.p1[0] - x)/d) * positivity(x - Wall.p1[0])
dy = potential_field_1d(d) * math.fabs((Wall.p1[1] - y)/d) * positivity(y - Wall.p1[1])
return potential_field_1d(d), dx, dy, d
def potential_room(x, y, alpha=0):
wall1 = Wall(0.5, 2, (0, 0))
# print('wall1: ', wall1.orientation)
wall2 = Wall(0.5, 2, (3, 0))
# print('wall2: ', wall2.orientation)
wall3 = Wall(1, 0.5, (2, -2))
# print('wall3: ', wall3.orientation)
wall4 = Wall(1, 0.5, (1, 2))
# print('wall4: ', wall4.orientation)
walls = [wall1, wall2, wall3, wall4]
field = 0
dx = 0
dy = 0
for w in walls:
_field, _dx, _dy, _d = potential_field_wall(x, y, w, alpha)
field += _field
dx += _dx
dy += _dy
if field > 10:
field = 10
return field, dx, dy
def virtual_leader(x, y, l_x, l_y, alpha=10, d_0=0.4, room_alpha=0):
# negative field values pushes the agent away and positive field values
# attracts the agent
dis_x = l_x - x
dis_y = l_y - y
dis = math.sqrt(math.pow(dis_x, 2) + math.pow(dis_y, 2))
field = potential_field_1d_force(dis, alpha=alpha, d_0=d_0)
dx = field * math.fabs(dis_x/dis) * positivity(l_x - x)
dy = field * math.fabs(dis_y/dis) * positivity(l_y - y)
_, _dx, _dy = potential_room(x, y, room_alpha)
return field, dx+8*_dx, dy+8*_dy
def virtual_obstacle(x, y, l_x, l_y):
dis_x = l_x - x
dis_y = l_y - y
dis = math.sqrt(math.pow(dis_x, 2) + math.pow(dis_y, 2))
field = rejection_field_1d_force(dis)
dx = field * math.fabs(dis_x/dis) * positivity(l_x - x)
dy = field * math.fabs(dis_y/dis) * positivity(l_y - y)
_, _dx, _dy = potential_room(x, y)
dx += _dx
dy += -dy
return field, dx, dy
def main():
# Room
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
Z = np.zeros_like(X)
for i in range(X.shape[1]):
for j in range(X.shape[0]):
_, Z[j][i], _ = potential_room(X[i][j], Y[i][j])
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Customize the z axis.
ax.set_zlim(0, 10)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
if __name__ == "__main__":
main()
|
the-stack_106_22345 | import unittest
import simplerestler
class ElementsTestCase(unittest.TestCase):
"""Tests for `elements.py`."""
def test_document_lists(self):
"""Lists Tests"""
d = simplerestler.Document()
ul = d.ul("One", "Two", "Three")
result = """
* One
* Two
* Three
"""
self.assertEqual(str(d), result)
d = simplerestler.Document()
ul = d.ol("One", "Two", "Three")
result = """
1. One
2. Two
3. Three
"""
self.assertEqual(str(d), result)
def test_document_comments(self):
"""Comments Tests"""
d = simplerestler.Document()
comment = d.comment("Comment text")
result = """
.. Comment text
"""
self.assertEqual(str(comment), result)
def test_document_p(self):
"""Paragraph Tests"""
d = simplerestler.Document()
p = d.p("This paragraph is on the first line This is the same line")
result = """
This paragraph is on the first line This is the same line
"""
self.assertEqual(str(p), result)
p = d.p("This paragraph is on the first line\nThis is a new line")
result = """
This paragraph is on the first line
This is a new line
"""
self.assertEqual(str(p), result)
p = d.p("This <b>paragraph</b> is on the first line<br/>This is a new line")
result = """
This **paragraph** is on the first line
This is a new line
"""
self.assertEqual(str(p), result)
def test_document_p(self):
"""Pre Tests"""
d = simplerestler.Document()
pre = d.pre("Literal - * and ** and ``")
result = """
::
Literal - * and ** and ``
"""
self.assertEqual(str(pre), result)
if __name__ == '__main__':
unittest.main() |
the-stack_106_22346 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Utilities for managing paths in Spack.
TODO: this is really part of spack.config. Consolidate it.
"""
import contextlib
import getpass
import os
import re
import subprocess
import tempfile
import llnl.util.tty as tty
from llnl.util.lang import memoized
import spack.paths
import spack.util.spack_yaml as syaml
__all__ = [
'substitute_config_variables',
'substitute_path_variables',
'canonicalize_path']
# Substitutions to perform
replacements = {
'spack': spack.paths.prefix,
'user': getpass.getuser(),
'tempdir': tempfile.gettempdir(),
}
# This is intended to be longer than the part of the install path
# spack generates from the root path we give it. Included in the
# estimate:
#
# os-arch -> 30
# compiler -> 30
# package name -> 50 (longest is currently 47 characters)
# version -> 20
# hash -> 32
# buffer -> 138
# ---------------------
# total -> 300
SPACK_MAX_INSTALL_PATH_LENGTH = 300
#: Padded paths comprise directories with this name (or some prefix of it). :
#: It starts with two underscores to make it unlikely that prefix matches would
#: include some other component of the intallation path.
SPACK_PATH_PADDING_CHARS = '__spack_path_placeholder__'
@memoized
def get_system_path_max():
# Choose a conservative default
sys_max_path_length = 256
try:
path_max_proc = subprocess.Popen(['getconf', 'PATH_MAX', '/'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
proc_output = str(path_max_proc.communicate()[0].decode())
sys_max_path_length = int(proc_output)
except (ValueError, subprocess.CalledProcessError, OSError):
tty.msg('Unable to find system max path length, using: {0}'.format(
sys_max_path_length))
return sys_max_path_length
def substitute_config_variables(path):
"""Substitute placeholders into paths.
Spack allows paths in configs to have some placeholders, as follows:
- $spack The Spack instance's prefix
- $user The current user's username
- $tempdir Default temporary directory returned by tempfile.gettempdir()
- $env The active Spack environment.
These are substituted case-insensitively into the path, and users can
use either ``$var`` or ``${var}`` syntax for the variables. $env is only
replaced if there is an active environment, and should only be used in
environment yaml files.
"""
import spack.environment as ev # break circular
env = ev.get_env({}, '')
if env:
replacements.update({'env': env.path})
else:
# If a previous invocation added env, remove it
replacements.pop('env', None)
# Look up replacements
def repl(match):
m = match.group(0).strip('${}')
return replacements.get(m.lower(), match.group(0))
# Replace $var or ${var}.
return re.sub(r'(\$\w+\b|\$\{\w+\})', repl, path)
def substitute_path_variables(path):
"""Substitute config vars, expand environment vars, expand user home."""
path = substitute_config_variables(path)
path = os.path.expandvars(path)
path = os.path.expanduser(path)
return path
def _get_padding_string(length):
spack_path_padding_size = len(SPACK_PATH_PADDING_CHARS)
num_reps = int(length / (spack_path_padding_size + 1))
extra_chars = length % (spack_path_padding_size + 1)
reps_list = [SPACK_PATH_PADDING_CHARS for i in range(num_reps)]
reps_list.append(SPACK_PATH_PADDING_CHARS[:extra_chars])
return os.path.sep.join(reps_list)
def add_padding(path, length):
"""Add padding subdirectories to path until total is length characters
Returns the padded path. If path is length - 1 or more characters long,
returns path. If path is length - 1 characters, warns that it is not
padding to length
Assumes path does not have a trailing path separator"""
padding_length = length - len(path)
if padding_length == 1:
# The only 1 character addition we can make to a path is `/`
# Spack internally runs normpath, so `foo/` will be reduced to `foo`
# Even if we removed this behavior from Spack, the user could normalize
# the path, removing the additional `/`.
# Because we can't expect one character of padding to show up in the
# resulting binaries, we warn the user and do not pad by a single char
tty.warn("Cannot pad path by exactly one character.")
if padding_length <= 0:
return path
# we subtract 1 from the padding_length to account for the path separator
# coming from os.path.join below
padding = _get_padding_string(padding_length - 1)
return os.path.join(path, padding)
def canonicalize_path(path):
"""Same as substitute_path_variables, but also take absolute path."""
# Get file in which path was written in case we need to make it absolute
# relative to that path.
filename = None
if isinstance(path, syaml.syaml_str):
filename = os.path.dirname(path._start_mark.name)
assert path._start_mark.name == path._end_mark.name
path = substitute_path_variables(path)
if not os.path.isabs(path):
if filename:
path = os.path.join(filename, path)
else:
path = os.path.abspath(path)
tty.debug("Using current working directory as base for abspath")
return os.path.normpath(path)
def longest_prefix_re(string, capture=True):
"""Return a regular expression that matches a the longest possible prefix of string.
i.e., if the input string is ``the_quick_brown_fox``, then::
m = re.compile(longest_prefix('the_quick_brown_fox'))
m.match('the_').group(1) == 'the_'
m.match('the_quick').group(1) == 'the_quick'
m.match('the_quick_brown_fox').group(1) == 'the_quick_brown_fox'
m.match('the_xquick_brown_fox').group(1) == 'the_'
m.match('the_quickx_brown_fox').group(1) == 'the_quick'
"""
if len(string) < 2:
return string
return "(%s%s%s?)" % (
"" if capture else "?:",
string[0],
longest_prefix_re(string[1:], capture=False)
)
#: regex cache for padding_filter function
_filter_re = None
def padding_filter(string):
"""Filter used to reduce output from path padding in log output.
This turns paths like this:
/foo/bar/__spack_path_placeholder__/__spack_path_placeholder__/...
Into paths like this:
/foo/bar/[padded-to-512-chars]/...
Where ``padded-to-512-chars`` indicates that the prefix was padded with
placeholders until it hit 512 characters. The actual value of this number
depends on what the `install_tree``'s ``padded_length`` is configured to.
For a path to match and be filtered, the placeholder must appear in its
entirety at least one time. e.g., "/spack/" would not be filtered, but
"/__spack_path_placeholder__/spack/" would be.
"""
global _filter_re
pad = spack.util.path.SPACK_PATH_PADDING_CHARS
if not _filter_re:
longest_prefix = longest_prefix_re(pad)
regex = (
r"((?:/[^/\s]*)*?)" # zero or more leading non-whitespace path components
r"(/{pad})+" # the padding string repeated one or more times
r"(/{longest_prefix})?(?=/)" # trailing prefix of padding as path component
)
regex = regex.replace("/", os.sep)
regex = regex.format(pad=pad, longest_prefix=longest_prefix)
_filter_re = re.compile(regex)
def replacer(match):
return "%s%s[padded-to-%d-chars]" % (
match.group(1),
os.sep,
len(match.group(0))
)
return _filter_re.sub(replacer, string)
@contextlib.contextmanager
def filter_padding():
"""Context manager to safely disable path padding in all Spack output.
This is needed because Spack's debug output gets extremely long when we use a
long padded installation path.
"""
padding = spack.config.get("config:install_tree:padded_length", None)
if padding:
# filter out all padding from the intsall command output
with tty.output_filter(padding_filter):
yield
else:
yield # no-op: don't filter unless padding is actually enabled
|
the-stack_106_22348 | #!/usr/bin/env python2.3
"""
Notification engine for periodic reports on branches and CRs.
"""
import sys
import os
import time
import cStringIO
import pprint
import optparse
import mail
from sfConstant import *
from sfMagma import SFMagmaTool
from sfReportCR import CRReport
from sfUtil import getTestMailAddr
dateTimeFmt = "%Y-%m-%dT%H:%M:%S.000Z"
debugEmailAddr = getTestMailAddr()
notifyBccList = None
fromAddr = '[email protected]'
myTZ = 'US/Pacific'
version=2.0
class PeriodicReport(SFMagmaTool):
# the list of contacts who have asked for a report.
contactList = []
def __init__(self, debug=0, logname='sf.preport'):
self.reportTime = time.time() # UTC time struct)
self.startTime = self.reportTime
SFMagmaTool.__init__(self, debug=debug, logname=logname)
self.debug=False
return
## END __init__
def queryContactList(self):
"""
Get the list of contacts who are requesting reports.
"""
q1 = ['PR_Frequency__c','!=','']
q2 = ['ContactStatus__c','!=','Inactive']
q3=['RecordTypeId','=','0123000000000sjAAA']
q4=['RecordTypeId','=','0123000000001trAAA']
q5=['RecordTypeId','=','012300000000RwfAAE']
q3a = ['PR_CR_Detail_Level__c','!=','']
q3b = ['PR_Branch_Detail_Level__c','!=','']
where = [q1, 'and', q2, 'and', '(', q3a, 'or', q3b, ')','and', '(', q3, 'or', q4, ')']
contactQryRes = self.query(CONTACT_OBJ, where=where, sc='all')
if contactQryRes not in BAD_INFO_LIST:
self.contactList = contactQryRes
msg = 'Found %s active Magma contacts who have requested a report.' \
%len(self.contactList)
self.setLog(msg, 'info')
#print "CONATCT Object length is: %s"%len(self.contactList)
return
## END queryContactList
def queryContactsById(self, contactIdList):
contactQryRes = self.retrieve(contactIdList, CONTACT_OBJ)
if contactQryRes not in BAD_INFO_LIST:
self.contactList = contactQryRes
pass
return
## END queryContactByID
def parseLastReportDate(self, contact):
"""
from the contact map provided, takes value of the
lastReportDate field if it exists, or the default last report
time if it does not, and returns a time struct and epoch time
"""
global myTZ
if os.environ.get('TZ','') != myTZ:
os.environ['TZ'] = myTZ
time.tzset()
if contact.has_key('PR_Last_Report_Date_Time__c'):
# parse last report time into epoch time
UTCTimeStr = contact['PR_Last_Report_Date_Time__c']
os.environ['TZ'] = "UTC"
time.tzset()
else:
# use a default value
#UTCTimeStr = self.props.get('notify', 'defaultLastReportTime')
UTCTimeStr = "2004-11-01T00:30:00.000Z"
lastReportUTCStruct = time.strptime(UTCTimeStr, dateTimeFmt)
lastReportEpoch = time.mktime(lastReportUTCStruct)
# Back to our time
if os.environ['TZ'] != myTZ:
os.environ['TZ'] = myTZ
time.tzset()
lastReportTimeStruct = time.localtime(lastReportEpoch)
return lastReportTimeStruct, lastReportEpoch, UTCTimeStr
## END parseLastReportDate(self, contact)
def determineEligibility(self):
"""
Method scans through the list of contacts to receive reports and
sees if it's time for them to get another report.
"""
eligibleContactList = []
FUDGE = 5 * 60
DAILY = (24 * 60 * 60) - FUDGE
WEEKLY = (7 * DAILY) - FUDGE
EVERY30 = (30 * DAILY) - FUDGE
for contact in self.contactList:
#pprint.pprint(contact)
discard, lastReportEpoch, discard = self.parseLastReportDate(contact)
secsSinceLastReport = self.reportTime - lastReportEpoch
if contact.get('Email', None) is None:
self.setLog("Contact has no email address - skipping: %s" \
%contact, 'warn')
continue
if self.outofband is True:
# we don't care about the last report date
eligibleContactList.append(contact)
elif contact.get('PR_Frequency__c','') == 'Daily' and \
secsSinceLastReport > DAILY:
# Eligible for daily report
eligibleContactList.append(contact)
elif contact.get('PR_Frequency__c','') == 'Weekly' and \
secsSinceLastReport > WEEKLY:
# Eligible for weekly report
eligibleContactList.append(contact)
elif contact.get('PR_Frequency__c','') == 'Every 30 Days' and \
secsSinceLastReport > EVERY30:
# Eligible for monthly report
eligibleContactList.append(contact)
else:
# Not eligible for any reports
pass
self.contactList = eligibleContactList
#print "Eligible Contact List: %s"%contactList
self.setLog("%d contact(s) eligible for a report this run" \
%len(self.contactList), 'info')
return
## END determineEligibility(self)
def determineSections(self, detailLvl):
"""
Move to the CR report class
"""
lowSections = ['TestFixes',
'Unassigned',
'RecentClose',]
medSections = []
verboseSections = ['Dev',
'PendDev',
'SCM',
'EngMgr',
'PE',
'Team',
'Untouched',]
if detailLvl == 'Low Detail':
sectionList = lowSections
elif detailLvl == 'Medium Detail':
sectionList = lowSections + medSections
elif detailLvl == 'Verbose':
sectionList = lowSections + medSections + verboseSections
else:
sectionList = []
msg = 'Invalid detail level "%s"' %detailLvl
self.setLog(msg, 'error')
return sectionList
## END determineSections
def generateReports(self):
"""
Pare down the list of contacts to those eligible, then
build a report for each contact found.
"""
self.determineEligibility()
for contact in self.contactList:
msg = "Generating report for %s (%s)" %(contact.get('Email'),
contact.get('Id'))
self.setLog(msg, 'info')
userId = contact.get('Contact_User_Id__c', None)
if userId is None:
msg = 'No userID for contact %s' %contact.get('Email')
self.setLog(msg, 'error')
continue
# figure out which CRreport sections to send to this person
detailLvl = contact.get('PR_CR_Detail_Level__c','')
crSectionList = self.determineSections(detailLvl)
if len(crSectionList) > 0:
crReport = CRReport(self, userId, contact, crSectionList)
crReport.generateReport()
if crReport.hasContent() or \
contact.get('PR_No_Empty__c', 'false') == 'false':
self.sendReport(contact, crReport)
# Mark the contact record with last report date if we generated any
# report
if len(crSectionList) > 0 and self.outofband is False:
self.updateContactLastRptDate(contact)
## END generateReports
def updateContactLastRptDate(self, contact):
"""
Update contact's Last Report field with notifier's reportTime
(as a UTC time struct).
"""
# check that reportTime isn't getting shifted upon insert...
data = {'Id': contact['Id'],
'PR_Last_Report_Date_Time__c': self.reportTime}
res = self.update(CONTACT_OBJ, data)
if res in BAD_INFO_LIST:
msg = 'Update of last report time for contact %s (%s) FAILED' \
%(contact['Email'], contact['Id'])
self.setLog(msg, 'error')
## END updateContactLastRptDate(self, contact)
def sendReport(self, contact, report):
"""
Email the generated report to the contact
"""
global fromAddr
toAddr = contact['Email']
if self.debug is True:
toAddr = debugEmailAddr
bccAddr = None
else:
bccAddr = notifyBccList
self.setLog("About to email report for %s" %contact.get('Email'),
'info')
mailserver = mail.MailServer(logger=self.log)
message = mail.Message(fromAddr, toAddr, report.getBody(),
subject=report.getSubject(),
bccAdds=bccAddr)
mailserver.sendEmail(message)
return
## END sendReport(self, contact, report)
def do(self, contactIdList=None, opts=None):
"""
Main flow for notifier
"""
global myTZ
if opts is not None:
self.debug = opts.debug
self.outofband = opts.oob
else:
self.debug=False
self.outofband=False
os.environ['TZ'] = myTZ
time.tzset()
global startTime
self.getConnectInfo(version, startTime=self.startTime)
if contactIdList is None:
self.queryContactList()
else:
self.queryContactsById(contactIdList)
self.generateReports()
self.setLog('Finished. Total Runtime is %s secs.' \
%(time.time() - self.startTime), 'info')
return
## END do(self)
def main():
op = optparse.OptionParser()
op.add_option('-d','--debug', action='store_true', dest='debug',
default=False, help='run in debug mode')
op.add_option('-o','--outofband', action='store_true', dest='oob',
default=False, help='Send report regardless of last sent.'
'Also, don\'t update last sent date/time')
opts, args = op.parse_args()
if len(args) > 0:
contactIdList = args
else:
contactIdList = None
notifier = PeriodicReport()
notifier.do(contactIdList, opts)
## END main()
if __name__ == "__main__":
main()
|
the-stack_106_22350 | import numpy as np
import os
from pycocotools.coco import COCO
from torch.utils.data import Dataset
import cv2
import skimage
import skimage.io
class CocoDataset(Dataset):
'''Coco data Style'''
def __init__(self, img_dir, annot_dir, set_name='val', transform=None):
self.img_dir = img_dir
self.annot_dir = annot_dir
self.set_name = set_name
self.transfrom = transform
self.coco = COCO(os.path.join(annot_dir, f'instances_{set_name}.json'))
self.image_ids = self.coco.getImgIds()
self.load_classes()
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
_img = self.load_image(idx)
_annot = self.load_annotations(idx)
sample = {'img': _img, 'annot': _annot}
if self.transfrom is not None:
sample = self.transfrom(sample)
return sample
def load_image(self, image_index):
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
path = os.path.join(self.img_dir, image_info['file_name'])
img = cv2.imread(path, cv2.IMREAD_COLOR)
# if len(img.shape) == 2:
# print(f'{path} ======> Grayscale Img')
# img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
# else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32)/255.0
return img
def load_annotations(self, image_index):
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
return annotations
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for idx, a in enumerate(coco_annotations):
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotation = np.zeros((1, 5))
annotation[0, :4] = a['bbox']
annotation[0, 4] = self.coco_label_to_label(a['category_id'])
annotations = np.append(annotations, annotation, axis=0)
# transform from [x, y, w, h] to [x1, y1, x2, y2]
annotations[:, 2] = annotations[:, 0] + annotations[:, 2]
annotations[:, 3] = annotations[:, 1] + annotations[:, 3]
return annotations
# # Get Ground Truth
# ann_idx = self.coco.getAnnIds(imgIds=self.image_ids[idx], iscrowd=False)
# coco_ann = self.coco.loadAnns(ann_idx)
#
# annotations = np.zeros((0, 5))
#
# # Bounding Box do not have annotation info
# if len(coco_ann) == 0:
# return annotations
#
# # Parse Annotation info
# for idx, ann in enumerate(coco_ann):
# annotation = np.zeros((1, 5))
# annotation[0, :4] = ann['bbox']
# annotation[0, 4] = self.coco_label_to_label(ann['category_id'])
# annotations = np.append(annotations, annotation, axis=0)
# # transform [xmin, ymin, w, h] to [xmin, ymin, xmax, ymax]
# annotations[:, 2] = annotations[:, 0] + annotations[:, 2]
# annotations[:, 3] = annotations[:, 1] + annotations[:, 3]
# return annotations
def image_aspect_ratio(self, image_index):
image = self.coco.loadImgs(self.image_ids[image_index])[0]
return float(image['width']) / float(image['height'])
def load_classes(self):
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes)
self.classes[c['name']] = len(self.classes)
# also load the reverse (label -> name)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
return self.labels
def coco_label_to_label(self, coco_label):
return self.coco_labels_inverse[coco_label]
def label_to_coco_label(self, label):
return self.coco_labels[label]
if __name__ == '__main__':
root_dir = '/media/jsk/data/namwon/defect/all_data/crop'
dataset = CocoDataset(root_dir)
dataset.__getitem__(0)
|
the-stack_106_22356 | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from imblearn.over_sampling import SVMSMOTE as OrigModel
import lale.docstrings
import lale.operators
from lale.lib.imblearn.base_resampler import (
BaseResamplerImpl,
_input_decision_function_schema,
_input_fit_schema,
_input_predict_proba_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
class SVMSMOTEImpl(BaseResamplerImpl):
def __init__(
self,
operator=None,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=1,
m_neighbors=10,
svm_estimator=None,
out_step=0.5,
):
if operator is None:
raise ValueError("Operator is a required argument.")
self._hyperparams = {
"sampling_strategy": sampling_strategy,
"random_state": random_state,
"k_neighbors": k_neighbors,
"n_jobs": n_jobs,
"m_neighbors": m_neighbors,
"svm_estimator": svm_estimator,
"out_step": out_step,
}
resampler_instance = OrigModel(**self._hyperparams)
super(SVMSMOTEImpl, self).__init__(
operator=operator, resampler=resampler_instance
)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": ["operator"],
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": {
"description": """Trainable Lale pipeline that is trained using the data obtained from the current imbalance corrector.
Predict, transform, predict_proba or decision_function would just be forwarded to the trained pipeline.
If operator is a Planned pipeline, the current imbalance corrector can't be trained without using an optimizer to
choose a trainable operator first. Please refer to lale/examples for more examples.""",
"anyOf": [{"laleType": "operator"}],
},
"sampling_strategy": {
"description": """sampling_strategy : float, str, dict or callable, default='auto'.
Sampling information to resample the data set.
""",
"anyOf": [
{
"description": """When ``float``,
it corresponds to the desired ratio of the number of
samples in the minority class over the number of samples in the
majority class after resampling. Therefore, the ratio is expressed as
:math:`\\alpha_{os} = N_{rm} / N_{M}` where :math:`N_{rm}` is the
number of samples in the minority class after resampling and
:math:`N_{M}` is the number of samples in the majority class.
.. warning::
``float`` is only available for **binary** classification. An
error is raised for multi-class classification.""",
"type": "number",
},
{
"description": """When ``str``, specify the class targeted by the resampling.
The number of samples in the different classes will be equalized.
Possible choices are:
``'minority'``: resample only the minority class;
``'not minority'``: resample all classes but the minority class;
``'not majority'``: resample all classes but the majority class;
``'all'``: resample all classes;
``'auto'``: equivalent to ``'not majority'``.""",
"enum": [
"minority",
"not minority",
"not majority",
"all",
"auto",
],
},
{
"description": """- When ``dict``, the keys correspond to the targeted classes.
The values correspond to the desired number of samples for each targeted
class.""",
"type": "object",
},
{
"description": """When callable, function taking ``y`` and returns a ``dict``.
The keys correspond to the targeted classes. The values correspond to the
desired number of samples for each class.""",
"laleType": "callable",
},
],
"default": "auto",
},
"random_state": {
"description": "Control the randomization of the algorithm.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{
"description": "The seed used by the random number generator",
"type": "integer",
},
{
"description": "Random number generator instance.",
"laleType": "numpy.random.RandomState",
},
],
"default": None,
},
"k_neighbors": {
"description": """If ``int``, number of nearest neighbours to used to construct synthetic samples.
If object, an estimator that inherits from
:class:`sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the k_neighbors.""",
"anyOf": [{"laleType": "Any"}, {"type": "integer"}],
"default": 5,
},
"n_jobs": {
"description": "The number of threads to open if possible.",
"type": "integer",
"default": 1,
},
"m_neighbors": {
"description": """If ``int``, number of nearest neighbours to use to determine if a minority sample is in danger.
If object, an estimator that inherits from
:class:`sklearn.neighbors.base.KNeighborsMixin` that will be used to find the m_neighbors.""",
"anyOf": [{"laleType": "Any"}, {"type": "integer"}],
"default": 10,
},
"svm_estimator": {
"description": "A parametrized sklearn.svm.SVC classifier can be passed.",
"anyOf": [{"laleType": "Any"}, {"enum": [None]}],
"default": None,
},
"out_step": {
"description": "Step size when extrapolating.",
"type": "number",
"default": 0.5,
},
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Over-sampling using SVM-SMOTE,
Variant of SMOTE algorithm which use an SVM algorithm to detect sample to use for generating new synthetic samples.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.svm_smote.html",
"import_from": "imblearn.over_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
lale.docstrings.set_docstrings(SVMSMOTEImpl, _combined_schemas)
SVMSMOTE = lale.operators.make_operator(SVMSMOTEImpl, _combined_schemas)
|
the-stack_106_22357 | import json
from datetime import datetime, timedelta
from typing import Optional, List, Tuple, Dict
# Core
import pandas as pd
import plotly.express as px
import requests
import streamlit as st
import streamlit.components.v1 as components
import numpy as np
import ftx
########################################################################################
# Config
########################################################################################
COINS = ["RUNE","BNB","BTC","ETH",]
########################################################################################
# Data
########################################################################################
def get_market_price() -> float:
"""
Grabs Rune/USD market price from FTX.
"""
ftx_client = ftx.FtxClient()
result = ftx_client.get_market('RUNE/USD')
market_price = result['price']
return market_price
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
def get_rune_stats() -> Dict[str, float]:
# TODO - docstring
"""
Gathers ThorChain Network data from MCCN and SCCN,
"""
market_price = get_market_price()
# MCCN
mccn = requests.get('https://midgard.thorchain.info/v2/network')
mccn_dict = mccn.json()
mccn_total_pooled_rune = float(mccn_dict['totalPooledRune']) / 1e8
mccn_total_active_bond = float(mccn_dict['bondMetrics']['totalActiveBond']) / 1e8
# ---
# SCCN
sccn = requests.get('http://thorb.sccn.nexain.com:8080/v1/network')
sccn_dict = sccn.json()
sccn_total_staked_rune = float(sccn_dict['totalStaked']) / 1e8
sccn_total_active_bond = float(sccn_dict['bondMetrics']['totalActiveBond']) / 1e8
# calculations
rune_in_lp_count = mccn_total_pooled_rune + sccn_total_staked_rune
rune_bonded_count = mccn_total_active_bond + sccn_total_active_bond
total_in_network_count = rune_in_lp_count + rune_bonded_count
deterministic_value = rune_in_lp_count * market_price * 3 # In USD
determined_price = deterministic_value / total_in_network_count # In USD
speculation = market_price - determined_price # USD
speculation_pct = speculation / market_price
# Collect Results
result_dict = {
'Rune_in_LP_count': rune_in_lp_count,
'Rune_bonded_count': rune_bonded_count,
'total_in_network_count': total_in_network_count,
'deterministic_value_usd': deterministic_value,
'determined_price': determined_price,
'market_price_usd': market_price,
'speculation_premium_usd': speculation,
'speculation_pct_of_market': speculation_pct,
}
return result_dict
########################################################################################
# Helpers
########################################################################################
########################################################################################
# App
########################################################################################
# ------------------------------ Config ------------------------------
st.set_page_config(
page_title="ThorViz", page_icon="⚡", layout="wide",
#initial_sidebar_state="expanded"
)
# ------------------------------ Sidebar ------------------------------
#st.sidebar.title("Config")
#days = st.sidebar.slider("Days:", value=60, min_value=0, max_value=60)
primary = "RUNE"#st.sidebar.selectbox("Primary:", COINS)
#compare = st.sidebar.multiselect("Compare: ", COINS)
# ------------------------------ Header ------------------------------
# This is a hack to align center :-(
col1, col2, col3 = st.beta_columns([1,6,1])
with col1:
st.write("")
with col2:
st.title("ThorViz - Thorchain Tokenomics Dashboard")
st.title(' #RAISETHECAPS ⚡⚡⚡')
with col3:
st.markdown("[Github](https://github.com/JormThor/ThorViz)")
st.markdown("[Twitter](https://twitter.com/JormungandrThor)")
# ------------------------------ Sections ------------------------------
st.error("This dashboard is in beta, there may be bugs. 🐞")
if st.button("➡️ I understand there could be bugs, let me in!"):
with st.beta_expander("Rune Baseline Price ⚖️"):
st.write('RUNE TO MOON!')
rune_dict = get_rune_stats()
# Note: `:,` formats values with comma for easier reading.
# TODO can format all of these with a helper function later
# Total Pooled Rune
st.markdown(f'**Total Pooled Rune (MCCN + SCCN):** **ᚱ** {np.round(rune_dict["Rune_in_LP_count"], 2):,}')
# DUPLICATE
# st.button(f' TEST - Total Pooled Rune (MCCN + SCCN): ᚱ{np.round(rune_dict["Rune_in_LP_count"], 2):,}', help='tooltip_test')
# Total Active Bonded Rune
st.markdown(f'**Total Active Bonded Rune:** **ᚱ** {np.round(rune_dict.get("Rune_bonded_count"), 2):,}')
# In-Network Rune
st.markdown(f'**Total In-Network Rune:** **ᚱ** {np.round(rune_dict.get("total_in_network_count"), 2):,}')
# Deterministic Value
st.markdown(f'**Total Deterministic Value:** **$** {np.round(rune_dict.get("deterministic_value_usd"), 2):,}')
# Market Price
st.markdown(f'**Market Price (USD):** **$** {np.round(rune_dict.get("market_price_usd"), 2):,} (source: FTX)')
# Calculate Baseline Price
st.markdown(f'**Baseline Price (USD):** **$** {np.round(rune_dict.get("determined_price"), 2):,}')
st.markdown(f'**Speculation Premium (USD):** **$** {np.round(rune_dict.get("speculation_premium_usd"), 2) :,}')
st.markdown(f'**Speculation percentage of Market Price:** {np.round(rune_dict.get("speculation_pct_of_market") * 100 ,2)}%')
with st.beta_expander("Baseline Price - Show me the math! 📐🤔"):
st.latex("TotalRuneInNetwork = TotalPooledRune + TotalActiveBondedRune")
st.latex("DeterministicValue \, (USD) = TotalPooledRune * MarketPrice * 3")
st.latex(r"BaselinePrice \, (USD) = \frac{DeterministicValue}{TotalRuneInNetwork}")
st.latex("SpeculationPremium \, (USD) = MarketPrice - BaselinePrice")
st.latex(r"SpeculationPercentOfMarket \, (\%) = \frac{SpeculationPremium}{MarketPrice} * 100")
# ------------------------------ Trading View Chart ------------------------------
with st.beta_expander("Market (Binance) 📈📊"):
components.html(
f"""
<div class="tradingview-widget-container">
<div id="tradingview_49e5b"></div>
<script type="text/javascript" src="https://s3.tradingview.com/tv.js"></script>
<script type="text/javascript">
new TradingView.widget(
{{
"symbol": "BINANCE:{primary}USDT",
"interval": "4H",
"timezone": "Etc/UTC",
"style": "1",
"locale": "en",
"toolbar_bg": "#f1f3f6",
"enable_publishing": false,
"allow_symbol_change": true,
"container_id": "tradingview_49e5b"
}}
);
</script>
</div>
""",
height=550,
width=900,
)
with st.beta_expander("ThorChain Data Resources 💾🗄️"):
st.write(f'Data Source for MCCN: {"https://midgard.thorchain.info/v2/network"}')
st.write(f'Data Source for SCCN: {"http://thorb.sccn.nexain.com:8080/v1/network"}')
# TODO add additional tools
with st.beta_expander("Support Development 🛠️ 🙏"):
st.write('If this dashboard is helpful consider supporting development.')
st.write("Were a distributed team of TradFi -> ThorFi Data Folks")
st.write('Project Roadmap will come soon.')
st.markdown('**BTC Address:** `bc1qrf0vtudhdr4acfg6m9qdedekcmw5w2lghurahz` ')
st.markdown('**ETH Address:** `0x2368bf7b77319b43532087ebceab79546b980758` ')
st.markdown('**BNB Address:** `bnb1t6nwpm5scau65gkm9ys8wceutz5p2a3mjhmjuc` ')
|
the-stack_106_22360 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.IEC61970.Core.IdentifiedObject import IdentifiedObject
class OutageStep(IdentifiedObject):
"""Holds an outage start and end time for each supply point of an outage record. The supply point for a given step is the associated PowerSystemResource instance.Holds an outage start and end time for each supply point of an outage record. The supply point for a given step is the associated PowerSystemResource instance.
"""
def __init__(self, averageCml=0.0, damage=False, specialCustomerCount=0, criticalCustomerCount=0, estimatedRestoreDateTime='', shockReported=False, callerCount=0, fatality=False, jobPriority='', totalCustomerCount=0, injury=False, totalCml=0.0, Crews=None, noPowerInterval=None, ConductingEquipmentRoles=None, status=None, OutageRecord=None, OutageCodes=None, *args, **kw_args):
"""Initialises a new 'OutageStep' instance.
@param averageCml: Average Customer Minutes Lost (CML) for this supply point for this outage.
@param damage: True if damage reported by caller or engineer.
@param specialCustomerCount: Number of customers with high reliability required.
@param criticalCustomerCount: Number of customers with critical needs, e.g., with a dialysis machine.
@param estimatedRestoreDateTime: Estimated time of restoration.
@param shockReported: True if shocks reported by caller or engineer.
@param callerCount: Number of customers phoning in.
@param fatality: True if fatalities reported by caller or engineer.
@param jobPriority:
@param totalCustomerCount: Number of customers connected to the PowerSystemResource.
@param injury: True if injuries reported by caller or engineer.
@param totalCml: Total Customer Minutes Lost (CML) for this supply point for this outage.
@param Crews:
@param noPowerInterval: Date and time interval between loss and restoration of power.
@param ConductingEquipmentRoles:
@param status:
@param OutageRecord:
@param OutageCodes: Multiple outage codes may apply to an outage step.
"""
#: Average Customer Minutes Lost (CML) for this supply point for this outage.
self.averageCml = averageCml
#: True if damage reported by caller or engineer.
self.damage = damage
#: Number of customers with high reliability required.
self.specialCustomerCount = specialCustomerCount
#: Number of customers with critical needs, e.g., with a dialysis machine.
self.criticalCustomerCount = criticalCustomerCount
#: Estimated time of restoration.
self.estimatedRestoreDateTime = estimatedRestoreDateTime
#: True if shocks reported by caller or engineer.
self.shockReported = shockReported
#: Number of customers phoning in.
self.callerCount = callerCount
#: True if fatalities reported by caller or engineer.
self.fatality = fatality
self.jobPriority = jobPriority
#: Number of customers connected to the PowerSystemResource.
self.totalCustomerCount = totalCustomerCount
#: True if injuries reported by caller or engineer.
self.injury = injury
#: Total Customer Minutes Lost (CML) for this supply point for this outage.
self.totalCml = totalCml
self._Crews = []
self.Crews = [] if Crews is None else Crews
self.noPowerInterval = noPowerInterval
self._ConductingEquipmentRoles = []
self.ConductingEquipmentRoles = [] if ConductingEquipmentRoles is None else ConductingEquipmentRoles
self.status = status
self._OutageRecord = None
self.OutageRecord = OutageRecord
self._OutageCodes = []
self.OutageCodes = [] if OutageCodes is None else OutageCodes
super(OutageStep, self).__init__(*args, **kw_args)
_attrs = ["averageCml", "damage", "specialCustomerCount", "criticalCustomerCount", "estimatedRestoreDateTime", "shockReported", "callerCount", "fatality", "jobPriority", "totalCustomerCount", "injury", "totalCml"]
_attr_types = {"averageCml": float, "damage": bool, "specialCustomerCount": int, "criticalCustomerCount": int, "estimatedRestoreDateTime": str, "shockReported": bool, "callerCount": int, "fatality": bool, "jobPriority": str, "totalCustomerCount": int, "injury": bool, "totalCml": float}
_defaults = {"averageCml": 0.0, "damage": False, "specialCustomerCount": 0, "criticalCustomerCount": 0, "estimatedRestoreDateTime": '', "shockReported": False, "callerCount": 0, "fatality": False, "jobPriority": '', "totalCustomerCount": 0, "injury": False, "totalCml": 0.0}
_enums = {}
_refs = ["Crews", "noPowerInterval", "ConductingEquipmentRoles", "status", "OutageRecord", "OutageCodes"]
_many_refs = ["Crews", "ConductingEquipmentRoles", "OutageCodes"]
def getCrews(self):
return self._Crews
def setCrews(self, value):
for p in self._Crews:
filtered = [q for q in p.OutageSteps if q != self]
self._Crews._OutageSteps = filtered
for r in value:
if self not in r._OutageSteps:
r._OutageSteps.append(self)
self._Crews = value
Crews = property(getCrews, setCrews)
def addCrews(self, *Crews):
for obj in Crews:
if self not in obj._OutageSteps:
obj._OutageSteps.append(self)
self._Crews.append(obj)
def removeCrews(self, *Crews):
for obj in Crews:
if self in obj._OutageSteps:
obj._OutageSteps.remove(self)
self._Crews.remove(obj)
# Date and time interval between loss and restoration of power.
noPowerInterval = None
def getConductingEquipmentRoles(self):
return self._ConductingEquipmentRoles
def setConductingEquipmentRoles(self, value):
for x in self._ConductingEquipmentRoles:
x.OutageStep = None
for y in value:
y._OutageStep = self
self._ConductingEquipmentRoles = value
ConductingEquipmentRoles = property(getConductingEquipmentRoles, setConductingEquipmentRoles)
def addConductingEquipmentRoles(self, *ConductingEquipmentRoles):
for obj in ConductingEquipmentRoles:
obj.OutageStep = self
def removeConductingEquipmentRoles(self, *ConductingEquipmentRoles):
for obj in ConductingEquipmentRoles:
obj.OutageStep = None
status = None
def getOutageRecord(self):
return self._OutageRecord
def setOutageRecord(self, value):
if self._OutageRecord is not None:
filtered = [x for x in self.OutageRecord.OutageSteps if x != self]
self._OutageRecord._OutageSteps = filtered
self._OutageRecord = value
if self._OutageRecord is not None:
if self not in self._OutageRecord._OutageSteps:
self._OutageRecord._OutageSteps.append(self)
OutageRecord = property(getOutageRecord, setOutageRecord)
def getOutageCodes(self):
"""Multiple outage codes may apply to an outage step.
"""
return self._OutageCodes
def setOutageCodes(self, value):
for p in self._OutageCodes:
filtered = [q for q in p.OutageSteps if q != self]
self._OutageCodes._OutageSteps = filtered
for r in value:
if self not in r._OutageSteps:
r._OutageSteps.append(self)
self._OutageCodes = value
OutageCodes = property(getOutageCodes, setOutageCodes)
def addOutageCodes(self, *OutageCodes):
for obj in OutageCodes:
if self not in obj._OutageSteps:
obj._OutageSteps.append(self)
self._OutageCodes.append(obj)
def removeOutageCodes(self, *OutageCodes):
for obj in OutageCodes:
if self in obj._OutageSteps:
obj._OutageSteps.remove(self)
self._OutageCodes.remove(obj)
|
the-stack_106_22364 | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACCOUNT_SID"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
number = client.lookups.phone_numbers("+15108675310").fetch(country_code="US")
print(number.carrier['type'])
print(number.carrier['name'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.