id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
188765
|
class ImageGroupData:
def __init__(self, start_y, start_x, y_gear_offset, x_gear_offset):
self.start_y = start_y
self.start_x = start_x
self.y_gear_offset = y_gear_offset
self.x_gear_offset = x_gear_offset
class ImageTypeData:
def __init__(self, size, rel_start_offset, rows=None, columns=None, pass_fn=None, next_offset=None):
self.size = size
self.rel_start_offset = rel_start_offset
self.rows = rows
self.columns = columns
self.pass_fn = pass_fn
self.next_offset = next_offset
class ImageSplitter:
STANDARD_GROUP_DATA = ImageGroupData(375, 390, 177, 174)
BLUEPRINT_GROUP_DATA = ImageGroupData(293, 423, 129, 126)
CARD_DATA = ImageTypeData((430,350), (-112,-10))
SET_DATA = ImageTypeData((20,140), (-100,100))
STAT_DATA = ImageTypeData((56,56), (0,0), 3, 6, lambda col, row: col >= 4 and row != 1, (87,60))
LEVEL_DATA = ImageTypeData((30,70), (268,180), 2, 3, lambda col, row: row == 0 and col == 2, (-88,60))
STADARD_PAGE_DATA = ImageTypeData((160,160), (41,-157), 3, 5, lambda _, __: False, (177, 174))
BLUEPRINT_PAGE_DATA = ImageTypeData((90, 90), (74, -100), 6, 4, lambda _, __: False, (129, 126))
def extract_stat_card(self, img, gear_coord, is_blueprint=False):
group_data = self.get_group_data(is_blueprint)
return self.get_single_image_split(img, gear_coord, ImageSplitter.CARD_DATA, group_data)
def extract_set_image(self, img, gear_coord, is_blueprint=False):
group_data = self.get_group_data(is_blueprint)
return self.get_single_image_split(img, gear_coord, ImageSplitter.SET_DATA, group_data)
def extract_stat_images(self, img, gear_coord, is_blueprint=False):
group_data = self.get_group_data(is_blueprint)
return self.get_group_image_split(img, gear_coord, ImageSplitter.STAT_DATA, group_data)
def extract_level_images(self, img, gear_coord, is_blueprint=False):
group_data = self.get_group_data(is_blueprint)
return self.get_group_image_split(img, gear_coord, ImageSplitter.LEVEL_DATA, group_data)
def extract_page_images(self, img, is_blueprint=False):
group_data = self.get_group_data(is_blueprint)
page_data = ImageSplitter.BLUEPRINT_PAGE_DATA if is_blueprint else ImageSplitter.STADARD_PAGE_DATA
return self.get_group_image_split(img, (1,1), page_data, group_data)
def get_single_image_split(self, img, gear_coord, image_type_data, group_data):
start_coord = self.get_start_coord(gear_coord, image_type_data.rel_start_offset, group_data)
return self.get_image_from_start(img, start_coord, image_type_data.size)
def get_group_image_split(self, img, gear_coord, image_type_data, group_data):
images = []
abs_start_coord = self.get_start_coord(gear_coord, image_type_data.rel_start_offset, group_data)
for row in range(image_type_data.rows):
for col in range(image_type_data.columns):
if image_type_data.pass_fn(col, row):
continue
start_y = abs_start_coord[0] + (image_type_data.next_offset[0] * row)
start_x = abs_start_coord[1] + (image_type_data.next_offset[1] * col)
single_img = self.get_image_from_start(img, (start_y, start_x), image_type_data.size)
images.append(single_img)
return images
def get_start_coord(self, gear_coord, rel_start_offset, group_data):
y_gear_pos, x_gear_pos = gear_coord
y_rel_offset, x_rel_offset = rel_start_offset
y_point = group_data.start_y + (y_gear_pos-1) * group_data.y_gear_offset + y_rel_offset
x_point = group_data.start_x + (x_gear_pos-1) * group_data.x_gear_offset + x_rel_offset
return (y_point, x_point)
def get_image_from_start(self, img, abs_start_coord, size):
low_y, low_x = abs_start_coord
height, width = size
high_y = low_y + height
high_x = low_x + width
return img[low_y:high_y, low_x:high_x]
def get_group_data(self, is_blueprint):
return ImageSplitter.BLUEPRINT_GROUP_DATA if is_blueprint else ImageSplitter.STANDARD_GROUP_DATA
|
StarcoderdataPython
|
3396626
|
""" A pipeline that transfers data from sqlite to postgreSQL """
import sqlite3
import psycopg2
import queries as q
DBNAME = "czlilzkt"
USER = "czlilzkt"
PASSWORD = "<PASSWORD>"
HOST = "ziggy.db.elephantsql.com"
sqlite_rpg_db = "rpg_db.sqlite3"
# Make connection_______
# sqlite connector
def sqlite_connect(sqlite_db):
""" returns sqlite connections """
sqlite_conn = sqlite3.connect(sqlite_db)
return sqlite_conn
#postgres connector
def pg_connect(dbname, password, user, host):
""" returns pg connection object """
pg_conn = psycopg2.connect(dbname=DBNAME, user=USER, password=PASSWORD, host=HOST)
return pg_conn
# Make Cursor______
def create_cursor(conn):
""" returns cursor """
curs = conn.cursor()
return curs
# Making our query
def execute_query(curs, query, reading=True):
""" executes query """
curs.execute(query)
if reading:
results = curs.fetchall()
return results
# OK, this is nicks code for adding characters. it runs through a for loop below for each character in the list.
def add_characters(pg_curs, character_list):
"""Grabbing characters from sqlite"""
insert_character_statement = """
INSERT INTO charactercreator_character
(character_id, name, level, exp, hp, strength, intelligence, dexterity, wisdom)
VALUES {};
"""
for character in character_list:
pg_curs.execute(insert_character_statement.format(character))
# THE IF NAME MAIN SECTION ________
if __name__ == "__main__":
pg_conn = pg_connect(DBNAME, USER, PASSWORD, HOST)
pg_curs = create_cursor(pg_conn)
sl_conn = sqlite_connect(sqlite_rpg_db)
sl_curs = create_cursor(sl_conn)
# all_data = execute_query(pg_curs, q.SELECT_ALL.format("TEST_TABLE"))
results = execute_query(pg_curs, q.create_character_table, reading=False)
character_list = execute_query(sl_curs, q.SELECT_ALL.format("charactercreator_character"))
add_characters(pg_curs, character_list)
pg_conn.commit()
sl_conn.commit()
print(character_list)
|
StarcoderdataPython
|
3251721
|
<reponame>nghia-tran/f5-common-python<filename>f5/bigip/tm/asm/signatures.py
# coding=utf-8
#
# Copyright 2015-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from f5.bigip.resource import AsmResource
from f5.bigip.resource import Collection
from icontrol.exceptions import iControlUnexpectedHTTPError
class Signatures_s(Collection):
"""BIG-IP® ASM Signatures collection."""
def __init__(self, asm):
super(Signatures_s, self).__init__(asm)
self._meta_data['object_has_stats'] = False
self._meta_data['allowed_lazy_attributes'] = [Signature]
self._meta_data['attribute_registry'] = {
'tm:asm:signatures:signaturestate': Signature
}
class Signature(AsmResource):
"""BIG-IP® ASM Signature resource.
note:: Only user created signatures can be modified/deleted.
Default signatures are READ-ONLY
"""
def __init__(self, signatures_s):
super(Signature, self).__init__(signatures_s)
self._meta_data['required_json_kind'] = 'tm:asm:signatures:signaturestate'
self._meta_data['required_creation_parameters'].update(
('attackTypeReference', 'rule')
)
def create(self, **kwargs):
"""Custom creation logic to handle edge cases
This shouldn't be needed, but ASM has a tendency to raise various errors that
are painful to handle from a customer point-of-view. These errors are especially
pronounced when doing things concurrently with asm.
The error itself are described in their exception handler
To address these failure, we try a number of exception handling cases to catch
and reliably deal with the error.
:param kwargs:
:return:
"""
ex = iControlUnexpectedHTTPError(
"Failed to delete the signature"
)
for _ in range(0, 30):
try:
return self._create(**kwargs)
except iControlUnexpectedHTTPError as ex:
if self._check_exception(ex):
continue
else:
raise
raise ex
def delete(self, **kwargs):
"""Custom deletion logic to handle edge cases
This shouldn't be needed, but ASM has a tendency to raise various errors that
are painful to handle from a customer point-of-view. These errors are especially
pronounced when doing things concurrently with asm.
The error itself are described in their exception handler
To address these failure, we try a number of exception handling cases to catch
and reliably deal with the error.
:param kwargs:
:return:
"""
ex = iControlUnexpectedHTTPError(
"Failed to delete the signature"
)
for _ in range(0, 30):
try:
return self._delete(**kwargs)
except iControlUnexpectedHTTPError as ex:
if self._check_exception(ex):
continue
else:
raise
raise ex
def modify(self, **kwargs):
ex = iControlUnexpectedHTTPError(
"Failed to modify the signature"
)
for _ in range(0, 30):
try:
return self._modify(**kwargs)
except iControlUnexpectedHTTPError as ex:
if self._check_exception(ex):
continue
else:
raise
raise ex
def update(self, **kwargs):
ex = iControlUnexpectedHTTPError(
"Failed to delete the signature"
)
for _ in range(0, 30):
try:
return self._update(**kwargs)
except iControlUnexpectedHTTPError as ex:
if self._check_exception(ex):
continue
else:
raise
raise ex
def _check_exception(self, ex):
"""Check for exceptions in action responses
In versions of ASM < v12, the REST API is quite unstable and therefore
needs some additional supporting retries to ensure that actions function
as expected. In particular versions 11.5.4 and 11.6.0 are affected.
This method handles checking for various exceptions and allowing the
given command to retry itself.
:param ex:
:return:
"""
retryable = [
# iControlUnexpectedHTTPError: 500 Unexpected Error: Internal Server Error ...
# {
# "code": 500,
# "message": "Could not add_signature the Attack Signature. "
# "Failed on insert to PLC.NEGSIG_SET_SIGNATURES "
# "(DBD::mysql::db do failed: Lock wait timeout exceeded; "
# "try restarting transaction)
#
'Lock wait timeout exceeded',
# {
# "code": 500,
# "message": "DBD::mysql::db do failed: Deadlock found when "
# "trying to get lock; try restarting transaction"
#
'Deadlock found when',
# {
# "code": 404,
# "message": "Could not add_signature the Attack Signature, "
# "internal data inconsistency was detected.",
'internal data inconsistency',
]
if any(x in str(ex) for x in retryable):
time.sleep(3)
return True
elif 'errorStack' in ex:
stack = ' '.join(ex['errorStack'])
if any(x in stack for x in retryable):
time.sleep(3)
return True
else:
return False
else:
return False
|
StarcoderdataPython
|
1780432
|
# Turns on debugging features in Flask
DEBUG = True
SERVER_NAME = "127.0.0.1:8080"
# Create dummy secrey key so we can use sessions
SECRET_KEY = '123456790'
# Create in-memory database
SQLALCHEMY_DATABASE_URI = 'mysql://:@localhost/inventory'
SQLALCHEMY_ECHO = True
# Flask-mail
MAIL_SERVER = ''
MAIL_PORT = 465
MAIL_USE_SSL= True
MAIL_USERNAME = ''
MAIL_PASSWORD = ''
# Flask-Security config
SECURITY_URL_PREFIX = "/admin"
SECURITY_PASSWORD_HASH = "<PASSWORD>"
SECURITY_PASSWORD_SALT = "<PASSWORD>"
# Flask-Security URLs, overridden because they don't put a / at the end
SECURITY_LOGIN_URL = "/login/"
SECURITY_LOGOUT_URL = "/logout/"
SECURITY_REGISTER_URL = "/register/"
SECURITY_RESET_URL = "/reset/"
SECURITY_POST_LOGIN_VIEW = "/admin/"
SECURITY_POST_LOGOUT_VIEW = "/admin/"
SECURITY_POST_REGISTER_VIEW = "/admin/"
SECURITY_POST_RESET_VIEW = "/admin/"
# Flask-Security features
SECURITY_REGISTERABLE = True
SECURITY_RECOVERABLE = True
SECURITY_SEND_REGISTER_EMAIL = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
CSRF_ENABLED = True
# Flask-Login
# SESSION_PROTECTION = None
# Flask-APScheduler
SCHEDULER_API_ENABLED = True
JOBS = [
{
'id': 'monitoring_ping_job',
'func': 'inventory.app:ping_job',
'args': (),
'trigger': 'interval',
'seconds': 30
}
]
|
StarcoderdataPython
|
30965
|
import requests
from teste_app import settigns
def google(q: str):
"""Faz uma pesquisa no google"""
return requests.get(settigns.GOOGLE, params={"q": q})
|
StarcoderdataPython
|
1624569
|
""" Utility functions operating on operation matrices """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
import scipy.linalg as _spl
import scipy.sparse as _sps
import scipy.sparse.linalg as _spsl
import warnings as _warnings
import collections as _collections
from . import jamiolkowski as _jam
from . import matrixtools as _mt
from . import lindbladtools as _lt
from . import basistools as _bt
from ..objects.basis import Basis as _Basis, ExplicitBasis as _ExplicitBasis, DirectSumBasis as _DirectSumBasis
from ..objects.label import Label as _Label
IMAG_TOL = 1e-7 # tolerance for imaginary part being considered zero
def _flat_mut_blks(i, j, blockDims):
# like _mut(i,j,dim).flatten() but works with basis *blocks*
N = sum(blockDims)
mx = _np.zeros((N, N), 'd'); mx[i, j] = 1.0
ret = _np.zeros(sum([d**2 for d in blockDims]), 'd')
i = 0; off = 0
for d in blockDims:
ret[i:i + d**2] = mx[off:off + d, off:off + d].flatten()
i += d**2; off += d
return ret
def _hack_sqrtm(A):
sqrt, _ = _spl.sqrtm(A, disp=False) # Travis found this scipy function
# to be incorrect in certain cases (we need a workaround)
if _np.any(_np.isnan(sqrt)): # this is sometimes a good fallback when sqrtm doesn't work.
ev, U = _np.linalg.eig(A)
sqrt = _np.dot(U, _np.dot(_np.diag(_np.sqrt(ev)), _np.linalg.inv(U)))
return sqrt
def fidelity(A, B):
"""
Returns the quantum state fidelity between density
matrices A and B given by :
F = Tr( sqrt{ sqrt(A) * B * sqrt(A) } )^2
To compute process fidelity, pass this function the
Choi matrices of the two processes, or just call
:function:`entanglement_fidelity` with the operation matrices.
Parameters
----------
A : numpy array
First density matrix.
B : numpy array
Second density matrix.
Returns
-------
float
The resulting fidelity.
"""
evals, U = _np.linalg.eig(A)
if len([ev for ev in evals if abs(ev) > 1e-8]) == 1:
# special case when A is rank 1, A = vec * vec^T and sqrt(A) = A
ivec = _np.argmax(evals)
vec = U[:, ivec:(ivec + 1)]
F = evals[ivec].real * _np.dot(_np.conjugate(_np.transpose(vec)), _np.dot(B, vec)).real # vec^T * B * vec
return float(F)
evals, U = _np.linalg.eig(B)
if len([ev for ev in evals if abs(ev) > 1e-8]) == 1:
# special case when B is rank 1 (recally fidelity is sym in args)
ivec = _np.argmax(evals)
vec = U[:, ivec:(ivec + 1)]
F = evals[ivec].real * _np.dot(_np.conjugate(_np.transpose(vec)), _np.dot(A, vec)).real # vec^T * A * vec
return float(F)
#if _np.array_equal(A, B): return 1.0 # HACK - some cases when A and B are perfecty equal sqrtm(A) fails...
sqrtA = _hack_sqrtm(A) # _spl.sqrtm(A)
# test the scipy sqrtm function - sometimes fails when rank defficient
#assert(_np.linalg.norm(_np.dot(sqrtA, sqrtA) - A) < 1e-8)
if _np.linalg.norm(_np.dot(sqrtA, sqrtA) - A) > 1e-8:
evals = _np.linalg.eigvals(A)
_warnings.warn(("sqrtm(A) failure when computing fidelity - beware result. "
"Maybe due to rank defficiency - eigenvalues of A are: %s") % evals)
F = (_mt.trace(_hack_sqrtm(_np.dot(sqrtA, _np.dot(B, sqrtA)))).real)**2 # Tr( sqrt{ sqrt(A) * B * sqrt(A) } )^2
return float(F)
def frobeniusdist(A, B):
"""
Returns the frobenius distance between gate
or density matrices A and B given by :
sqrt( sum( (A_ij-B_ij)^2 ) )
Parameters
----------
A : numpy array
First matrix.
B : numpy array
Second matrix.
Returns
-------
float
The resulting frobenius distance.
"""
return _mt.frobeniusnorm(A - B)
def frobeniusdist2(A, B):
"""
Returns the square of the frobenius distance between gate
or density matrices A and B given by :
sum( (A_ij-B_ij)^2 )
Parameters
----------
A : numpy array
First matrix.
B : numpy array
Second matrix.
Returns
-------
float
The resulting frobenius distance.
"""
return _mt.frobeniusnorm2(A - B)
def residuals(A, B):
"""
Calculate residuals between the elements of two matrices
Parameters
----------
A : numpy array
First matrix.
B : numpy array
Second matrix.
Returns
-------
np.array
residuals
"""
return (A - B).flatten()
def tracenorm(A):
"""
Compute the trace norm of matrix A given by:
Tr( sqrt{ A^dagger * A } )
Parameters
----------
A : numpy array
The matrix to compute the trace norm of.
"""
if _np.linalg.norm(A - _np.conjugate(A.T)) < 1e-8:
#Hermitian, so just sum eigenvalue magnitudes
return _np.sum(_np.abs(_np.linalg.eigvals(A)))
else:
#Sum of singular values (positive by construction)
return _np.sum(_np.linalg.svd(A, compute_uv=False))
def tracedist(A, B):
"""
Compute the trace distance between matrices A and B,
given by:
D = 0.5 * Tr( sqrt{ (A-B)^dagger * (A-B) } )
Parameters
----------
A, B : numpy array
The matrices to compute the distance between.
"""
return 0.5 * tracenorm(A - B)
def diamonddist(A, B, mxBasis='pp', return_x=False):
"""
Returns the approximate diamond norm describing the difference between gate
matrices A and B given by :
D = ||A - B ||_diamond = sup_rho || AxI(rho) - BxI(rho) ||_1
Parameters
----------
A, B : numpy array
The *gate* matrices to use when computing the diamond norm.
mxBasis : Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
return_x : bool, optional
Whether to return a numpy array encoding the state (rho) at
which the maximal trace distance occurs.
Returns
-------
dm : float
Diamond norm
W : numpy array
Only returned if `return_x = True`. Encodes the state rho, such that
`dm = trace( |(J(A)-J(B)).T * W| )`.
"""
mxBasis = _bt.build_basis_for_matrix(A, mxBasis)
#currently cvxpy is only needed for this function, so don't import until here
import cvxpy as _cvxpy
#Check if using version < 1.0
old_cvxpy = bool(tuple(map(int, _cvxpy.__version__.split('.'))) < (1, 0))
# This SDP implementation is a modified version of Kevin's code
#Compute the diamond norm
#Uses the primal SDP from arXiv:1207.5726v2, Sec 3.2
#Maximize 1/2 ( < J(phi), X > + < J(phi).dag, X.dag > )
#Subject to [[ I otimes rho0, X],
# [X.dag, I otimes rho1]] >> 0
# rho0, rho1 are density matrices
# X is linear operator
#Jamiolkowski representation of the process
# J(phi) = sum_ij Phi(Eij) otimes Eij
#< A, B > = Tr(A.dag B)
#def vec(matrix_in):
# # Stack the columns of a matrix to return a vector
# return _np.transpose(matrix_in).flatten()
#
#def unvec(vector_in):
# # Slice a vector into columns of a matrix
# d = int(_np.sqrt(vector_in.size))
# return _np.transpose(vector_in.reshape( (d,d) ))
#Code below assumes *un-normalized* Jamiol-isomorphism, so multiply by
# density mx dimension (`smallDim`) below
JAstd = _jam.fast_jamiolkowski_iso_std(A, mxBasis)
JBstd = _jam.fast_jamiolkowski_iso_std(B, mxBasis)
#Do this *after* the fast_jamiolkowski_iso calls above because these will convert
# A & B to a "single-block" basis representation when mxBasis has multiple blocks.
dim = JAstd.shape[0]
smallDim = int(_np.sqrt(dim))
JAstd *= smallDim # see above comment
JBstd *= smallDim # see above comment
assert(dim == JAstd.shape[1] == JBstd.shape[0] == JBstd.shape[1])
#CHECK: Kevin's jamiolowski, which implements the un-normalized isomorphism:
# smallDim * _jam.jamiolkowski_iso(M, "std", "std")
#def kevins_jamiolkowski(process, representation = 'superoperator'):
# # Return the Choi-Jamiolkowski representation of a quantum process
# # Add methods as necessary to accept different representations
# process = _np.array(process)
# if representation == 'superoperator':
# # Superoperator is the linear operator acting on vec(rho)
# dimension = int(_np.sqrt(process.shape[0]))
# print "dim = ",dimension
# jamiolkowski_matrix = _np.zeros([dimension**2, dimension**2], dtype='complex')
# for i in range(dimension**2):
# Ei_vec= _np.zeros(dimension**2)
# Ei_vec[i] = 1
# output = unvec(_np.dot(process,Ei_vec))
# tmp = _np.kron(output, unvec(Ei_vec))
# print "E%d = \n" % i,unvec(Ei_vec)
# #print "contrib =",_np.kron(output, unvec(Ei_vec))
# jamiolkowski_matrix += tmp
# return jamiolkowski_matrix
#JAstd_kev = jamiolkowski(A)
#JBstd_kev = jamiolkowski(B)
#print "diff A = ",_np.linalg.norm(JAstd_kev/2.0-JAstd)
#print "diff B = ",_np.linalg.norm(JBstd_kev/2.0-JBstd)
#Kevin's function: def diamondnorm( jamiolkowski_matrix ):
jamiolkowski_matrix = JBstd - JAstd
# Here we define a bunch of auxiliary matrices because CVXPY doesn't use complex numbers
K = jamiolkowski_matrix.real # J.real
L = jamiolkowski_matrix.imag # J.imag
if old_cvxpy:
Y = _cvxpy.Variable(dim, dim) # X.real
Z = _cvxpy.Variable(dim, dim) # X.imag
sig0 = _cvxpy.Variable(smallDim, smallDim) # rho0.real
sig1 = _cvxpy.Variable(smallDim, smallDim) # rho1.real
tau0 = _cvxpy.Variable(smallDim, smallDim) # rho1.imag
tau1 = _cvxpy.Variable(smallDim, smallDim) # rho1.imag
else:
Y = _cvxpy.Variable(shape=(dim, dim)) # X.real
Z = _cvxpy.Variable(shape=(dim, dim)) # X.imag
sig0 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho0.real
sig1 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho1.real
tau0 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho1.imag
tau1 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho1.imag
ident = _np.identity(smallDim, 'd')
objective = _cvxpy.Maximize(_cvxpy.trace(K.T * Y + L.T * Z))
constraints = [_cvxpy.bmat([
[_cvxpy.kron(ident, sig0), Y, -_cvxpy.kron(ident, tau0), -Z],
[Y.T, _cvxpy.kron(ident, sig1), Z.T, -_cvxpy.kron(ident, tau1)],
[_cvxpy.kron(ident, tau0), Z, _cvxpy.kron(ident, sig0), Y],
[-Z.T, _cvxpy.kron(ident, tau1), Y.T, _cvxpy.kron(ident, sig1)]]) >> 0,
_cvxpy.bmat([[sig0, -tau0],
[tau0, sig0]]) >> 0,
_cvxpy.bmat([[sig1, -tau1],
[tau1, sig1]]) >> 0,
sig0 == sig0.T,
sig1 == sig1.T,
tau0 == -tau0.T,
tau1 == -tau1.T,
_cvxpy.trace(sig0) == 1.,
_cvxpy.trace(sig1) == 1.]
prob = _cvxpy.Problem(objective, constraints)
try:
prob.solve(solver="CVXOPT")
# prob.solve(solver="ECOS")
# prob.solve(solver="SCS")#This always fails
except _cvxpy.error.SolverError as e:
_warnings.warn("CVXPY failed: %s - diamonddist returning -2!" % str(e))
return (-2, _np.zeros((dim, dim))) if return_x else -2
except:
_warnings.warn("CVXOPT failed (uknown err) - diamonddist returning -2!")
return (-2, _np.zeros((dim, dim))) if return_x else -2
#Validate result
#assert( abs(_np.trace(_np.dot(K.T,Y.value) + _np.dot(L.T,Z.value))-prob.value) < 1e-6 ), \
# "Diamondnorm mismatch"
if return_x:
X = Y.value + 1j * Z.value # encodes state at which maximum trace-distance occurs
return prob.value, X
else:
return prob.value
def jtracedist(A, B, mxBasis='pp'): # Jamiolkowski trace distance: Tr(|J(A)-J(B)|)
"""
Compute the Jamiolkowski trace distance between operation matrices A and B,
given by:
D = 0.5 * Tr( sqrt{ (J(A)-J(B))^2 } )
where J(.) is the Jamiolkowski isomorphism map that maps a operation matrix
to it's corresponding Choi Matrix.
Parameters
----------
A, B : numpy array
The matrices to compute the distance between.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
"""
JA = _jam.fast_jamiolkowski_iso_std(A, mxBasis)
JB = _jam.fast_jamiolkowski_iso_std(B, mxBasis)
return tracedist(JA, JB)
def entanglement_fidelity(A, B, mxBasis='pp'):
"""
Returns the "entanglement" process fidelity between gate
matrices A and B given by :
F = Tr( sqrt{ sqrt(J(A)) * J(B) * sqrt(J(A)) } )^2
where J(.) is the Jamiolkowski isomorphism map that maps a operation matrix
to it's corresponding Choi Matrix.
Parameters
----------
A, B : numpy array
The matrices to compute the fidelity between.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The basis of the matrices. Allowed values are Matrix-unit (std),
Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt)
(or a custom basis object).
"""
d2 = A.shape[0]
def isTP(x): return _np.isclose(x[0, 0], 1.0) and all(
[_np.isclose(x[0, i], 0) for i in range(d2)])
def isUnitary(x): return _np.allclose(_np.identity(d2, 'd'), _np.dot(x, x.conjugate().T))
if isTP(A) and isTP(B) and isUnitary(B): # then assume TP-like gates & use simpler formula
TrLambda = _np.trace(_np.dot(A, B.conjugate().T)) # same as using _np.linalg.inv(B)
d2 = A.shape[0]
return TrLambda / d2
JA = _jam.jamiolkowski_iso(A, mxBasis, mxBasis)
JB = _jam.jamiolkowski_iso(B, mxBasis, mxBasis)
return fidelity(JA, JB)
def average_gate_fidelity(A, B, mxBasis='pp'):
"""
Computes the average gate fidelity (AGF) between two gates.
Average gate fidelity (F_g) is related to entanglement fidelity
(F_p), via:
F_g = (d * F_p + 1)/(1 + d),
where d is the Hilbert space dimension. This formula, and the
definition of AGF, can be found in Phys. Lett. A 303 249-252 (2002).
Parameters
----------
A : array or gate
The gate to compute the AGI to B of. E.g., an imperfect
implementation of B.
B : array or gate
The gate to compute the AGI to A of. E.g., the target gate
corresponding to A.
mxBasis : {"std","gm","pp"} or Basis object, optional
The basis of the matrices.
Returns
-------
AGI : float
The AGI of A to B.
"""
d = int(round(_np.sqrt(A.shape[0])))
PF = entanglement_fidelity(A, B, mxBasis=mxBasis)
AGF = (d * PF + 1) / (1 + d)
return float(AGF)
def average_gate_infidelity(A, B, mxBasis="gm"):
"""
Computes the average gate infidelity (AGI) between two gates.
Average gate infidelity is related to entanglement infidelity
(EI) via:
AGI = (d * (1-EI) + 1)/(1 + d),
where d is the Hilbert space dimension. This formula, and the
definition of AGI, can be found in Phys. Lett. A 303 249-252 (2002).
Parameters
----------
A : array or gate
The gate to compute the AGI to B of. E.g., an imperfect
implementation of B.
B : array or gate
The gate to compute the AGI to A of. E.g., the target gate
corresponding to A.
mxBasis : {"std","gm","pp"} or Basis object, optional
The basis of the matrices.
Returns
----------
AGI : float
The AGI of A to B.
"""
return 1 - average_gate_fidelity(A, B, mxBasis)
def entanglement_infidelity(A, B, mxBasis='pp'):
"""
Returns the entanglement infidelity (EI) between gate
matrices A and B given by :
EI = 1 - Tr( sqrt{ sqrt(J(A)) * J(B) * sqrt(J(A)) } )^2
where J(.) is the Jamiolkowski isomorphism map that maps a operation matrix
to it's corresponding Choi Matrix.
Parameters
----------
A, B : numpy array
The matrices to compute the fidelity between.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The basis of the matrices. Allowed values are Matrix-unit (std),
Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt)
(or a custom basis object).
Returns
-------
EI : float
The EI of A to B.
"""
return 1 - float(entanglement_fidelity(A, B, mxBasis))
def gateset_infidelity(mdl, target_model, itype='EI',
weights=None, mxBasis=None):
"""
Computes the average-over-gates of the infidelity between gates in `mdl`
and the gates in `target_model`. If `itype` is 'EI' then the "infidelity"
is the entanglement infidelity; if `itype` is 'AGI' then the "infidelity"
is the average gate infidelity (AGI and EI are related by a dimension
dependent constant).
This is the quantity that RB error rates are sometimes claimed to be
related to directly related.
Parameters
----------
mdl : Model
The model to calculate the average infidelity, to `target_model`, of.
target_model : Model
The model to calculate the average infidelity, to `mdl`, of.
itype : str, optional
The infidelity type. Either 'EI', corresponding to entanglement
infidelity, or 'AGI', corresponding to average gate infidelity.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `mdl` and the values are, possibly unnormalized, probabilities.
These probabilities corresponding to the weighting in the average,
so if the model contains gates A and B and weights[A] = 2 and
weights[B] = 1 then the output is Inf(A)*2/3 + Inf(B)/3 where
Inf(X) is the infidelity (to the corresponding element in the other
model) of X. If None, a uniform-average is taken, equivalent to
setting all the weights to 1.
mxBasis : {"std","gm","pp"} or Basis object, optional
The basis of the models. If None, the basis is obtained from
the model.
Returns
-------
float
The weighted average-over-gates infidelity between the two models.
"""
assert(itype == 'AGI' or itype == 'EI'), \
"The infidelity type must be `AGI` (average gate infidelity) or `EI` (entanglement infidelity)"
if mxBasis is None: mxBasis = mdl.basis
sum_of_weights = 0
I_list = []
for gate in list(target_model.operations.keys()):
if itype == 'AGI':
I = average_gate_infidelity(mdl.operations[gate], target_model.operations[gate], mxBasis=mxBasis)
if itype == 'EI':
I = entanglement_infidelity(mdl.operations[gate], target_model.operations[gate], mxBasis=mxBasis)
if weights is None:
w = 1
else:
w = weights[gate]
I_list.append(w * I)
sum_of_weights += w
assert(sum_of_weights > 0), "The sum of the weights should be positive!"
AI = _np.sum(I_list) / sum_of_weights
return AI
def unitarity(A, mxBasis="gm"):
"""
Returns the "unitarity" of a channel, as defined in Wallman et al,
``Estimating the Coherence of noise'' NJP 17 113020 (2015). The
unitarity is given by (Prop 1 in Wallman et al):
u(A) = Tr( A_u^{\dagger} A_u ) / (d^2 - 1),
where A_u is the unital submatrix of A, and d is the dimension of
the Hilbert space. When A is written in any basis for which the
first element is the normalized identity (e.g., the pp or gm
bases), The unital submatrix of A is the matrix obtained when the
top row and left hand column is removed from A.
Parameters
----------
A : array or gate
The gate for which the unitarity is to be computed.
mxBasis : {"std","gm","pp"} or a Basis object, optional
The basis of the matrix.
d : int, optional
The dimension of the Hilbert space.
Returns
----------
u : float
The unitarity of the gate A.
"""
d = int(round(_np.sqrt(A.shape[0])))
basisMxs = _bt.basis_matrices(mxBasis, A.shape[0])
if _np.allclose(basisMxs[0], _np.identity(d, 'd')):
B = A
else:
B = _bt.change_basis(A, mxBasis, "gm") # everything should be able to be put in the "gm" basis
unital = B[1:d**2, 1:d**2]
u = _np.trace(_np.dot(_np.conj(_np.transpose(unital)), unital)) / (d**2 - 1)
return u
def fidelity_upper_bound(operationMx):
"""
Get an upper bound on the fidelity of the given
operation matrix with any unitary operation matrix.
The closeness of the result to one tells
how "unitary" the action of operationMx is.
Parameters
----------
operationMx : numpy array
The operation matrix to act on.
Returns
-------
float
The resulting upper bound on fidelity(operationMx, anyUnitaryGateMx)
"""
choi = _jam.jamiolkowski_iso(operationMx, choiMxBasis="std")
choi_evals, choi_evecs = _np.linalg.eig(choi)
maxF_direct = max([_np.sqrt(max(ev.real, 0.0)) for ev in choi_evals]) ** 2
iMax = _np.argmax([ev.real for ev in choi_evals]) # index of maximum eigenval
closestVec = choi_evecs[:, iMax:(iMax + 1)]
# #print "DEBUG: closest evec = ", closestUnitaryVec
# new_evals = _np.zeros( len(closestUnitaryVec) ); new_evals[iClosestU] = 1.0
# # gives same result:
# closestUnitaryJmx = _np.dot(choi_evecs, _np.dot( _np.diag(new_evals), _np.linalg.inv(choi_evecs) ) )
closestJmx = _np.kron(closestVec, _np.transpose(_np.conjugate(closestVec))) # closest rank-1 Jmx
closestJmx /= _mt.trace(closestJmx) # normalize so trace of Jmx == 1.0
maxF = fidelity(choi, closestJmx)
if not _np.isnan(maxF):
#Uncomment for debugging
#if abs(maxF - maxF_direct) >= 1e-6:
# print "DEBUG: operationMx:\n",operationMx
# print "DEBUG: choiMx:\n",choi
# print "DEBUG choi_evals = ",choi_evals, " iMax = ",iMax
# #print "DEBUG: J = \n", closestUnitaryJmx
# print "DEBUG: eigvals(J) = ", _np.linalg.eigvals(closestJmx)
# print "DEBUG: trace(J) = ", _mt.trace(closestJmx)
# print "DEBUG: maxF = %f, maxF_direct = %f" % (maxF, maxF_direct)
# raise ValueError("ERROR: maxF - maxF_direct = %f" % (maxF -maxF_direct))
assert(abs(maxF - maxF_direct) < 1e-6)
else:
maxF = maxF_direct # case when maxF is nan, due to scipy sqrtm function being buggy - just use direct F
closestOpMx = _jam.jamiolkowski_iso_inv(closestJmx, choiMxBasis="std")
return maxF, closestOpMx
#closestU_evals, closestU_evecs = _np.linalg.eig(closestUnitaryGateMx)
#print "DEBUG: U = \n", closestUnitaryGateMx
#print "DEBUG: closest U evals = ",closestU_evals
#print "DEBUG: evecs = \n",closestU_evecs
def get_povm_map(model, povmlbl):
"""
Constructs a gate-like quantity for the POVM within `model`.
This is done by embedding the `k`-outcome classical output space of the POVM
in the Hilbert-Schmidt space of `k` by `k` density matrices by placing the
classical probability distribution along the diagonal of the density matrix.
Currently, this is only implemented for the case when `k` equals `d`, the
dimension of the POVM's Hilbert space.
Parameters
----------
model : Model
The model supplying the POVM effect vectors and the basis those
vectors are in.
povmlbl : str
The POVM label
Returns
-------
numpy.ndarray
The matrix of the "POVM map" in the `model.basis` basis.
"""
povmVectors = [v.todense()[:, None] for v in model.povms[povmlbl].values()]
if isinstance(model.basis, _DirectSumBasis): # HACK - need to get this to work with general bases
blkDims = [int(_np.sqrt(comp.dim)) for comp in model.basis.component_bases]
else:
blkDims = [int(round(_np.sqrt(model.dim)))] # [d] where density matrix is dxd
nV = len(povmVectors)
#assert(d**2 == model.dim), "Model dimension (%d) is not a perfect square!" % model.dim
#assert( nV**2 == d ), "Can only compute POVM metrics when num of effects == H space dimension"
# I don't think above assert is needed - should work in general (Robin?)
povm_mx = _np.concatenate(povmVectors, axis=1).T # "povm map" ( B(H) -> S_k ) (shape= nV,model.dim)
Sk_embedding_in_std = _np.zeros((model.dim, nV))
for i in range(nV):
Sk_embedding_in_std[:, i] = _flat_mut_blks(i, i, blkDims)
std_to_basis = model.basis.reverse_transform_matrix("std") # _bt.transform_matrix("std", model.basis, blkDims)
assert(std_to_basis.shape == (model.dim, model.dim))
return _np.dot(std_to_basis, _np.dot(Sk_embedding_in_std, povm_mx))
def povm_fidelity(model, targetModel, povmlbl):
"""
Computes the process (entanglement) fidelity between POVM maps.
Parameters
----------
model, targetModel : Model
Models containing the two POVMs to compare.
povmlbl : str
The POVM label
Returns
-------
float
"""
povm_mx = get_povm_map(model, povmlbl)
target_povm_mx = get_povm_map(targetModel, povmlbl)
return entanglement_fidelity(povm_mx, target_povm_mx, targetModel.basis)
def povm_jtracedist(model, targetModel, povmlbl):
"""
Computes the Jamiolkowski trace distance between POVM maps using :func:`jtracedist`.
Parameters
----------
model, targetModel : Model
Models containing the two POVMs to compare.
povmlbl : str
The POVM label
Returns
-------
float
"""
povm_mx = get_povm_map(model, povmlbl)
target_povm_mx = get_povm_map(targetModel, povmlbl)
return jtracedist(povm_mx, target_povm_mx, targetModel.basis)
def povm_diamonddist(model, targetModel, povmlbl):
"""
Computes the diamond distance between POVM maps using :func:`diamonddist`.
Parameters
----------
model, targetModel : Model
Models containing the two POVMs to compare.
povmlbl : str
The POVM label
Returns
-------
float
"""
povm_mx = get_povm_map(model, povmlbl)
target_povm_mx = get_povm_map(targetModel, povmlbl)
return diamonddist(povm_mx, target_povm_mx, targetModel.basis)
#decompose operation matrix into axis of rotation, etc
def decompose_gate_matrix(operationMx):
"""
Compute how the action of a operation matrix can be
is decomposed into fixed points, axes of rotation,
angles of rotation, and decays. Also determines
whether a gate appears to be valid and/or unitary.
Parameters
----------
operationMx : numpy array
The operation matrix to act on.
Returns
-------
dict
A dictionary describing the decomposed action. Keys are:
'isValid' : bool
whether decomposition succeeded
'isUnitary' : bool
whether operationMx describes unitary action
'fixed point' : numpy array
the fixed point of the action
'axis of rotation' : numpy array or nan
the axis of rotation
'decay of diagonal rotation terms' : float
decay of diagonal terms
'rotating axis 1' : numpy array or nan
1st axis orthogonal to axis of rotation
'rotating axis 2' : numpy array or nan
2nd axis orthogonal to axis of rotation
'decay of off diagonal rotation terms' : float
decay of off-diagonal terms
'pi rotations' : float
angle of rotation in units of pi radians
"""
op_evals, op_evecs = _np.linalg.eig(_np.asarray(operationMx))
# fp_eigenvec = None
# aor_eval = None; aor_eigenvec = None
# ra_eval = None; ra1_eigenvec = None; ra2_eigenvec = None
TOL = 1e-4 # 1e-7
unit_eval_indices = [i for (i, ev) in enumerate(op_evals) if abs(ev - 1.0) < TOL]
#unit_eval_indices = [ i for (i,ev) in enumerate(op_evals) if ev > (1.0-TOL) ]
conjpair_eval_indices = []
for (i, ev) in enumerate(op_evals):
if i in unit_eval_indices: continue # don't include the unit eigenvalues in the conjugate pair count
# don't include existing conjugate pairs
if any([(i in conjpair) for conjpair in conjpair_eval_indices]): continue
for (j, ev2) in enumerate(op_evals[i + 1:]):
if abs(ev - _np.conjugate(ev2)) < TOL:
conjpair_eval_indices.append((i, j + (i + 1)))
break # don't pair i-th eigenvalue with any other (pairs should be disjoint)
real_eval_indices = [] # indices of real eigenvalues that are not units or a part of any conjugate pair
complex_eval_indices = [] # indices of complex eigenvalues that are not units or a part of any conjugate pair
for (i, ev) in enumerate(op_evals):
if i in unit_eval_indices: continue # don't include the unit eigenvalues
if any([(i in conjpair) for conjpair in conjpair_eval_indices]): continue # don't include the conjugate pairs
if abs(ev.imag) < TOL: real_eval_indices.append(i)
else: complex_eval_indices.append(i)
#if len(real_eval_indices + unit_eval_indices) > 0:
# max_real_eval = max([ op_evals[i] for i in real_eval_indices + unit_eval_indices])
# min_real_eval = min([ op_evals[i] for i in real_eval_indices + unit_eval_indices])
#else:
# max_real_eval = _np.nan
# min_real_eval = _np.nan
#
#fixed_points = [ op_evecs[:,i] for i in unit_eval_indices ]
#real_eval_axes = [ op_evecs[:,i] for i in real_eval_indices ]
#conjpair_eval_axes = [ (op_evecs[:,i],op_evecs[:,j]) for (i,j) in conjpair_eval_indices ]
#
#ret = { }
nQubits = _np.log2(operationMx.shape[0]) / 2
if nQubits == 1:
#print "DEBUG: 1 qubit decomp --------------------------"
#print " --> evals = ", op_evals
#print " --> unit eval indices = ", unit_eval_indices
#print " --> conj eval indices = ", conjpair_eval_indices
#print " --> unpaired real eval indices = ", real_eval_indices
#Special case: if have two conjugate pairs, check if one (or both) are real
# and break the one with the largest (real) value into two unpaired real evals.
if len(conjpair_eval_indices) == 2:
iToBreak = None
if abs(_np.imag(op_evals[conjpair_eval_indices[0][0]])) < TOL and \
abs(_np.imag(op_evals[conjpair_eval_indices[1][0]])) < TOL:
iToBreak = _np.argmax([_np.real(conjpair_eval_indices[0][0]), _np.real(conjpair_eval_indices[1][0])])
elif abs(_np.imag(op_evals[conjpair_eval_indices[0][0]])) < TOL: iToBreak = 0
elif abs(_np.imag(op_evals[conjpair_eval_indices[1][0]])) < TOL: iToBreak = 1
if iToBreak is not None:
real_eval_indices.append(conjpair_eval_indices[iToBreak][0])
real_eval_indices.append(conjpair_eval_indices[iToBreak][1])
del conjpair_eval_indices[iToBreak]
#Find eigenvector corresponding to fixed point (or closest we can get). This
# should be a unit eigenvalue with identity eigenvector.
if len(unit_eval_indices) > 0:
#Find linear least squares solution within possibly degenerate unit-eigenvalue eigenspace
# of eigenvector closest to identity density mx (the desired fixed point), then orthogonalize
# the remaining eigenvectors w.r.t this one.
A = _np.take(op_evecs, unit_eval_indices, axis=1)
b = _np.array([[1], [0], [0], [0]], 'd') # identity density mx
x = _np.dot(_np.linalg.pinv(_np.dot(A.T, A)), _np.dot(A.T, b))
fixedPtVec = _np.dot(A, x) # fixedPtVec / _np.linalg.norm(fixedPtVec)
fixedPtVec = fixedPtVec[:, 0]
iLargestContrib = _np.argmax(_np.abs(x)) # index of gate eigenvector which contributed the most
for ii, i in enumerate(unit_eval_indices):
if ii == iLargestContrib:
op_evecs[:, i] = fixedPtVec
iFixedPt = i
else:
op_evecs[:, i] = op_evecs[:, i] - _np.vdot(fixedPtVec, op_evecs[:, i]) * fixedPtVec
for jj, j in enumerate(unit_eval_indices[:ii]):
if jj == iLargestContrib: continue
op_evecs[:, i] = op_evecs[:, i] - _np.vdot(op_evecs[:, j], op_evecs[:, i]) * op_evecs[:, j]
op_evecs[:, i] /= _np.linalg.norm(op_evecs[:, i])
elif len(real_eval_indices) > 0:
# just take eigenvector corresponding to the largest real eigenvalue?
#iFixedPt = real_eval_indices[ _np.argmax( [ op_evals[i] for i in real_eval_indices ] ) ]
# ...OR take eigenvector corresponding to a real unpaired eigenvalue closest to identity:
idmx = _np.array([[1], [0], [0], [0]], 'd') # identity density mx
iFixedPt = real_eval_indices[_np.argmin([_np.linalg.norm(op_evecs[i] - idmx) for i in real_eval_indices])]
else:
#No unit or real eigenvalues => two complex conjugate pairs or unpaired complex evals --> bail out
return {'isValid': False, 'isUnitary': False, 'msg': "All evals are complex."}
#Find eigenvector corresponding to axis of rotation: find the *largest* unpaired real/unit eval
indsToConsider = (unit_eval_indices + real_eval_indices)[:]
del indsToConsider[indsToConsider.index(iFixedPt)] # don't consider fixed pt evec
if len(indsToConsider) > 0:
iRotAxis = indsToConsider[_np.argmax([op_evals[i] for i in indsToConsider])]
else:
#No unit or real eigenvalues => an unpaired complex eval --> bail out
return {'isValid': False, 'isUnitary': False, 'msg': "Unpaired complex eval."}
#There are only 2 eigenvalues left -- hopefully a conjugate pair giving rotation
inds = list(range(4))
del inds[inds.index(iFixedPt)]
del inds[inds.index(iRotAxis)]
if abs(op_evals[inds[0]] - _np.conjugate(op_evals[inds[1]])) < TOL:
iConjPair1, iConjPair2 = inds
else:
return {'isValid': False, 'isUnitary': False, 'msg': "No conjugate pair for rotn."}
return {'isValid': True,
'isUnitary': bool(len(unit_eval_indices) >= 2),
'fixed point': op_evecs[:, iFixedPt],
'axis of rotation': op_evecs[:, iRotAxis],
'rotating axis 1': op_evecs[:, iConjPair1],
'rotating axis 2': op_evecs[:, iConjPair2],
'decay of diagonal rotation terms': 1.0 - abs(op_evals[iRotAxis]),
'decay of off diagonal rotation terms': 1.0 - abs(op_evals[iConjPair1]),
'pi rotations': _np.angle(op_evals[iConjPair1]) / _np.pi,
'msg': "Success"}
else:
return {'isValid': False,
'isUnitary': False,
'msg': "Unsupported number of qubits: %d" % nQubits}
def state_to_dmvec(psi):
"""
Compute the vectorized density matrix which acts as the state `psi`.
This is just the outer product map |psi> => |psi><psi| with the
output flattened, i.e. `dot(psi, conjugate(psi).T)`.
Parameters
----------
psi : numpy array
The state vector.
Returns
-------
numpy array
The vectorized density matrix.
"""
psi = psi.reshape((psi.size, 1)) # convert to (N,1) shape if necessary
dm = _np.dot(psi, _np.conjugate(psi.T))
return dm.flatten()
def dmvec_to_state(dmvec, tol=1e-6):
"""
Compute the pure state describing the action of density matrix vector `dmvec`.
If `dmvec` represents a mixed state, ValueError is raised.
Parameters
----------
dmvec : numpy array
The vectorized density matrix, assumed to be in the standard (matrix
unit) basis.
tol : float, optional
tolerance for determining whether an eigenvalue is zero.
Returns
-------
numpy array
The pure state, as a column vector of shape = (N,1)
"""
d2 = dmvec.size; d = int(round(_np.sqrt(d2)))
dm = dmvec.reshape((d, d))
evals, evecs = _np.linalg.eig(dm)
k = None
for i, ev in enumerate(evals):
if abs(ev) > tol:
if k is None: k = i
else: raise ValueError("Cannot convert mixed dmvec to pure state!")
if k is None: raise ValueError("Cannot convert zero dmvec to puse state!")
psi = evecs[:, k] * _np.sqrt(evals[k])
psi.shape = (d, 1)
return psi
def unitary_to_process_mx(U):
"""
Compute the super-operator which acts on (row)-vectorized
density matrices from a unitary operator (matrix) U which
acts on state vectors. This super-operator is given by
the tensor product of U and conjugate(U), i.e. kron(U,U.conj).
Parameters
----------
U : numpy array
The unitary matrix which acts on state vectors.
Returns
-------
numpy array
The super-operator process matrix.
"""
# U -> kron(U,Uc) since U rho U_dag -> kron(U,Uc)
# since AXB --row-vectorize--> kron(A,B.T)*vec(X)
return _np.kron(U, _np.conjugate(U))
def process_mx_to_unitary(superop):
"""
Compute the unitary corresponding to the (unitary-action!)
super-operator `superop` which acts on (row)-vectorized
density matrices. The super-operator must be of the form
`kron(U,U.conj)` or an error will be thrown.
Parameters
----------
superop : numpy array
The superoperator matrix which acts on vectorized
density matrices (in the 'std' matrix-unit basis).
Returns
-------
numpy array
The unitary matrix which acts on state vectors.
"""
d2 = superop.shape[0]; d = int(round(_np.sqrt(d2)))
U = _np.empty((d, d), 'complex')
for i in range(d):
densitymx_i = _np.zeros((d, d), 'd'); densitymx_i[i, i] = 1.0 # |i><i|
UiiU = _np.dot(superop, densitymx_i.flat).reshape((d, d)) # U|i><i|U^dag
if i > 0:
j = 0
densitymx_ij = _np.zeros((d, d), 'd'); densitymx_ij[i, j] = 1.0 # |i><i|
UijU = _np.dot(superop, densitymx_ij.flat).reshape((d, d)) # U|i><j|U^dag
Uj = U[:, j]
Ui = _np.dot(UijU, Uj)
else:
##method1: use random state projection
#rand_state = _np.random.rand(d)
#projected_rand_state = _np.dot(UiiU, rand_state)
#assert(_np.linalg.norm(projected_rand_state) > 1e-8)
#projected_rand_state /= _np.linalg.norm(projected_rand_state)
#Ui = projected_rand_state
#method2: get eigenvector corresponding to largest eigenvalue (more robust)
evals, evecs = _np.linalg.eig(UiiU)
imaxeval = _np.argmax(_np.abs(evals))
#TODO: assert other eigenvalues are much smaller?
Ui = evecs[:, imaxeval]
Ui /= _np.linalg.norm(Ui)
U[:, i] = Ui
return U
def spam_error_generator(spamvec, target_spamvec, mxBasis, typ="logGTi"):
"""
Construct an error generator from a SPAM vector and it's target.
Computes the value of the error generator given by
`errgen = log( diag(spamvec / target_spamvec) )`, where division is
element-wise. This results in a (non-unique) error generator matrix
`E` such that `spamvec = exp(E) * target_spamvec`.
Note: This is currently of very limited use, as the above algorithm fails
whenever `target_spamvec` has zero elements where `spamvec` doesn't.
Parameters
----------
spamvec : ndarray
The SPAM vector.
target_spamvec : ndarray
The target SPAM vector.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
typ : {"logGTi"}
The type of error generator to compute. Allowed values are:
- "logGTi" : errgen = log( diag(spamvec / target_spamvec) )
Returns
-------
errgen : ndarray
The error generator.
"""
# Compute error generator for rho: rho = exp(E)rho0 => rho = A*rho0 => A = diag(rho/rho0)
assert(typ == "logGTi"), "Only logGTi type is supported so far"
d2 = len(spamvec)
errgen = _np.zeros((d2, d2), 'd') # type assumes this is density-mx evolution
diags = []
for a, b in zip(spamvec, target_spamvec):
if _np.isclose(b, 0.0):
if _np.isclose(a, b): d = 1
else: raise ValueError("Cannot take spam_error_generator")
else:
d = a / b
diags.append(d)
errgen[_np.diag_indices(d2)] = diags
return _spl.logm(errgen)
def error_generator(gate, target_op, mxBasis, typ="logG-logT"):
"""
Construct the error generator from a gate and its target.
Computes the value of the error generator given by
errgen = log( inv(target_op) * gate ), so that
gate = target_op * exp(errgen).
Parameters
----------
gate : ndarray
The operation matrix
target_op : ndarray
The target operation matrix
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
typ : {"logG-logT", "logTiG", "logGTi"}
The type of error generator to compute. Allowed values are:
- "logG-logT" : errgen = log(gate) - log(target_op)
- "logTiG" : errgen = log( dot(inv(target_op), gate) )
- "logGTi" : errgen = log( dot(gate,inv(target_op)) )
Returns
-------
errgen : ndarray
The error generator.
"""
TOL = 1e-8
if typ == "logG-logT":
try:
logT = _mt.unitary_superoperator_matrix_log(target_op, mxBasis)
except AssertionError: # if not unitary, fall back to just taking the real log
logT = _mt.real_matrix_log(target_op, "raise", TOL) # make a fuss if this can't be done
logG = _mt.approximate_matrix_log(gate, logT)
# Both logG and logT *should* be real, so we just take the difference.
if _np.linalg.norm(_np.imag(logG)) < TOL and \
_np.linalg.norm(_np.imag(logT)) < TOL:
return _np.real(logG - logT)
#Otherwise, there could be branch cut issues or worse, so just
# raise an error for now (maybe return a dummy if needed elsewhere?)
raise ValueError("Could not construct a real logarithms for the "
"'logG-logT' generator. Perhaps you should use "
"the 'logTiG' or 'logGTi' generator instead?")
elif typ == "logTiG":
target_op_inv = _spl.inv(target_op)
try:
errgen = _mt.near_identity_matrix_log(_np.dot(target_op_inv, gate), TOL)
except AssertionError: # not near the identity, fall back to the real log
_warnings.warn(("Near-identity matrix log failed; falling back "
"to approximate log for logTiG error generator"))
errgen = _mt.real_matrix_log(_np.dot(target_op_inv, gate), "warn", TOL)
if _np.linalg.norm(errgen.imag) > TOL:
_warnings.warn("Falling back to approximate log for logTiG error generator")
errgen = _mt.approximate_matrix_log(_np.dot(target_op_inv, gate),
_np.zeros(gate.shape, 'd'), TOL=TOL)
elif typ == "logGTi":
target_op_inv = _spl.inv(target_op)
try:
errgen = _mt.near_identity_matrix_log(_np.dot(gate, target_op_inv), TOL)
except AssertionError as e: # not near the identity, fall back to the real log
_warnings.warn(("Near-identity matrix log failed; falling back "
"to approximate log for logGTi error generator:\n%s") % str(e))
errgen = _mt.real_matrix_log(_np.dot(gate, target_op_inv), "warn", TOL)
if _np.linalg.norm(errgen.imag) > TOL:
_warnings.warn("Falling back to approximate log for logGTi error generator")
errgen = _mt.approximate_matrix_log(_np.dot(gate, target_op_inv),
_np.zeros(gate.shape, 'd'), TOL=TOL)
else:
raise ValueError("Invalid error-generator type: %s" % typ)
if _np.linalg.norm(_np.imag(errgen)) > TOL:
raise ValueError("Could not construct a real generator!")
#maybe this is actually ok, but a complex error generator will
# need to be plotted differently, etc -- TODO
return _np.real(errgen)
def operation_from_error_generator(error_gen, target_op, typ="logG-logT"):
"""
Construct a gate from an error generator and a target gate.
Inverts the computation fone in :func:`error_generator` and
returns the value of the gate given by
gate = target_op * exp(error_gen).
Parameters
----------
error_gen : ndarray
The error generator matrix
target_op : ndarray
The target operation matrix
typ : {"logG-logT", "logTiG"}
The type of error generator to compute. Allowed values are:
- "logG-logT" : errgen = log(gate) - log(target_op)
- "logTiG" : errgen = log( dot(inv(target_op), gate) )
Returns
-------
ndarray
The operation matrix.
"""
if typ == "logG-logT":
return _spl.expm(error_gen + _spl.logm(target_op))
elif typ == "logTiG":
return _np.dot(target_op, _spl.expm(error_gen))
elif typ == "logGTi":
return _np.dot(_spl.expm(error_gen), target_op)
else:
raise ValueError("Invalid error-generator type: %s" % typ)
def std_scale_factor(dim, projection_type):
"""
Returns the multiplicative scaling that should be applied to the output of
:func"`std_error_generators`, before using them as projectors, in order to
compute the "standard" reported projection onto that type of error (i.e.
the coefficient of the standard generator terms built un-normalized-Paulis).
Parameters
----------
dim : int
The dimension of the error generators; also the associated gate
dimension. This must be a perfect square, as `sqrt(dim)`
is the dimension of density matrices. For a single qubit, dim == 4.
projection_type : {"hamiltonian", "stochastic", "affine"}
The type/class of error generators to get the scaling for.
Returns
-------
float
"""
d2 = dim
d = int(_np.sqrt(d2))
if projection_type == "hamiltonian":
scaleFctr = 1.0 / (d * _np.sqrt(2))
# so projection is coefficient of Hamiltonian term (w/un-normalized Paulis)
elif projection_type == "stochastic":
scaleFctr = 1.0 / d
# so projection is coefficient of P*rho*P stochastic term in generator (w/un-normalized Paulis)
elif projection_type == "affine":
scaleFctr = 1.0 # so projection is coefficient of P affine term in generator (w/un-normalized Paulis)
else:
raise ValueError("Invalid projection_type argument: %s"
% projection_type)
return scaleFctr
def std_error_generators(dim, projection_type, projection_basis):
"""
Compute the gate error generators for a standard set of errors which
correspond to "Hamiltonian"- or "Stochastic"-type errors in terms of the
elements of the specified basis.
Parameters
----------
dim : int
The dimension of the error generators to be returned. This is also the
associated gate dimension, and must be a perfect square, as `sqrt(dim)`
is the dimension of density matrices. For a single qubit, dim == 4.
projection_type : {"hamiltonian", "stochastic", "affine"}
The type of error generators to construct. If "hamiltonian", then the
Hamiltonian generators which take a density matrix rho -> -i*[ H, rho ]
for Pauli-product matrix H. If "stochastic", then the Stochastic error
generators which take rho -> P*rho*P for Pauli-product matrix P. If
"affine", then the affine generators which take rho -> P.
projection_basis : {'std', 'gm', 'pp', 'qt'}
Which basis is used to construct the error generators. Allowed
values are Matrix-unit (std), Gell-Mann (gm),
Pauli-product (pp) and Qutrit (qt).
Returns
-------
generators : numpy.ndarray
An array of shape (#basis-elements,dim,dim). `generators[i]` is the
generator corresponding to the ith basis matrix in the
*std* (matrix unit) basis. (Note that in most cases #basis-elements
== dim, so the size of `generators` is (dim,dim,dim) ). Each
generator is normalized so that as a vector it has unit Frobenius norm.
"""
d2 = dim
d = int(_np.sqrt(d2))
#Get a list of the basis matrices
mxs = _bt.basis_matrices(projection_basis, d2)
assert(len(mxs) <= d2) # OK if there are fewer basis matrices (e.g. for bases w/multiple blocks)
assert(_np.isclose(d * d, d2)) # d2 must be a perfect square
lindbladMxs = _np.empty((len(mxs), d2, d2), 'complex')
for i, basisMx in enumerate(mxs):
if projection_type == "hamiltonian":
lindbladMxs[i] = _lt.hamiltonian_to_lindbladian(basisMx) # in std basis
elif projection_type == "stochastic":
lindbladMxs[i] = _lt.stochastic_lindbladian(basisMx) # in std basis
elif projection_type == "affine":
lindbladMxs[i] = _lt.affine_lindbladian(basisMx) # in std basis
else:
raise ValueError("Invalid projection_type argument: %s"
% projection_type)
norm = _np.linalg.norm(lindbladMxs[i].flat)
if not _np.isclose(norm, 0):
lindbladMxs[i] /= norm # normalize projector
assert(_np.isclose(_np.linalg.norm(lindbladMxs[i].flat), 1.0))
return lindbladMxs
def std_errgen_projections(errgen, projection_type, projection_basis,
mxBasis="gm", return_generators=False,
return_scale_fctr=False):
"""
Compute the projections of a gate error generator onto generators
for a standard set of errors constructed from the elements of a
specified basis.
Parameters
----------
errgen: : ndarray
The error generator matrix to project.
projection_type : {"hamiltonian", "stochastic", "affine"}
The type of error generators to project the gate error generator onto.
If "hamiltonian", then use the Hamiltonian generators which take a density
matrix rho -> -i*[ H, rho ] for Pauli-product matrix H. If "stochastic",
then use the Stochastic error generators which take rho -> P*rho*P for
Pauli-product matrix P (recall P is self adjoint). If "affine", then
use the affine error generators which take rho -> P (superop is |P>><<1|).
projection_basis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
return_generators : bool, optional
If True, return the error generators projected against along with the
projection values themseves.
return_scale_fctr : bool, optional
If True, also return the scaling factor that was used to multply the
projections onto *normalized* error generators to get the returned
values.
Returns
-------
projections : numpy.ndarray
An array of length equal to the number of elements in the
basis used to construct the projectors. Typically this is
is also the dimension of the gate (e.g. 4 for a single qubit).
generators : numpy.ndarray
Only returned when `return_generators == True`. An array of shape
(#basis-els,op_dim,op_dim) such that `generators[i]` is the
generator corresponding to the i-th basis element. Note
that these matricies are in the *std* (matrix unit) basis.
scale : float
Only returned when `return_scale_fctr == True`. A mulitplicative
scaling constant that *has already been applied* to `projections`.
"""
if isinstance(mxBasis, _Basis):
errgen_std = _bt.change_basis(errgen, mxBasis, mxBasis.equivalent('std'))
#expand operation matrix so it acts on entire space of dmDim x dmDim density matrices
errgen_std = _bt.resize_std_mx(errgen_std, 'expand', mxBasis.equivalent('std'),
mxBasis.simple_equivalent('std'))
else:
errgen_std = _bt.change_basis(errgen, mxBasis, "std")
d2 = errgen_std.shape[0]
d = int(_np.sqrt(d2))
# nQubits = _np.log2(d)
#Get a list of the d2 generators (in corresspondence with the
# Pauli-product matrices given by _basis.pp_matrices(d) ).
lindbladMxs = std_error_generators(d2, projection_type, projection_basis) # in std basis
assert(len(lindbladMxs) <= d2) # can be fewer projection matrices (== lenght of projection_basis)
assert(_np.isclose(d * d, d2)) # d2 must be a perfect square
projections = _np.empty(len(lindbladMxs), 'd')
for i, lindbladMx in enumerate(lindbladMxs):
proj = _np.real_if_close(_np.vdot(errgen_std.flatten(), lindbladMx.flatten()), tol=1000)
# # DEBUG - for checking why perfect gates gave weird projections --> log ambiguity
# print("DB: rawproj(%d) = " % i, proj)
# errgen_pp = errgen.copy() #_bt.change_basis(errgen_std,"std","pp")
# lindbladMx_pp = _bt.change_basis(lindbladMx,"std","pp")
# if proj > 1.0:
# for k in range(errgen_std.shape[0]):
# for j in range(errgen_std.shape[1]):
# if abs(errgen_pp[k,j].conjugate() * lindbladMx_pp[k,j]) > 1e-2:
# print(" [%d,%d]: + " % (k,j), errgen_pp[k,j].conjugate(),
# "*", lindbladMx_pp[k,j],
# "=", (errgen_pp[k,j].conjugate() * lindbladMx_pp[i,j]))
#assert(_np.isreal(proj)), "non-real projection: %s" % str(proj) #just a warning now
if not _np.isreal(proj):
_warnings.warn("Taking abs() of non-real projection: %s" % str(proj))
proj = abs(proj)
projections[i] = proj
scaleFctr = std_scale_factor(d2, projection_type)
projections *= scaleFctr
lindbladMxs /= scaleFctr # so projections * generators give original
ret = [projections]
if return_generators: ret.append(lindbladMxs)
if return_scale_fctr: ret.append(scaleFctr)
return ret[0] if len(ret) == 1 else tuple(ret)
def _assert_shape(ar, shape, sparse=False):
""" Asserts ar.shape == shape ; works with sparse matrices too """
if not sparse or len(shape) == 2:
assert(ar.shape == shape), \
"Shape mismatch: %s != %s!" % (str(ar.shape), str(shape))
else:
if len(shape) == 3: # first "dim" is a list
assert(len(ar) == shape[0]), \
"Leading dim mismatch: %d != %d!" % (len(ar), shape[0])
assert(shape[0] == 0 or ar[0].shape == (shape[1], shape[2])), \
"Shape mismatch: %s != %s!" % (str(ar[0].shape), str(shape[1:]))
elif len(shape) == 4: # first 2 dims are lists
assert(len(ar) == shape[0]), \
"Leading dim mismatch: %d != %d!" % (len(ar), shape[0])
assert(shape[0] == 0 or len(ar[0]) == shape[1]), \
"Second dim mismatch: %d != %d!" % (len(ar[0]), shape[1])
assert(shape[0] == 0 or shape[1] == 0 or ar[0][0].shape == (shape[2], shape[3])), \
"Shape mismatch: %s != %s!" % (str(ar[0][0].shape), str(shape[2:]))
else:
raise NotImplementedError("Number of dimensions must be <= 4!")
def lindblad_error_generators(dmbasis_ham, dmbasis_other, normalize,
other_mode="all"):
"""
Compute the superoperator-generators corresponding to Lindblad terms.
This routine computes the Hamiltonian and Non-Hamiltonian ("other")
superoperator generators which correspond to the terms of the Lindblad
expression:
L(rho) = sum_i( h_i [A_i,rho] ) +
sum_ij( o_ij * (B_i rho B_j^dag -
0.5( rho B_j^dag B_i + B_j^dag B_i rho) ) )
where {A_i} and {B_i} are bases (possibly the same) for Hilbert Schmidt
(density matrix) space with the identity element removed so that each
A_i and B_i are traceless. If we write L(rho) in terms of superoperators
H_i and O_ij,
L(rho) = sum_i( h_i H_i(rho) ) + sum_ij( o_ij O_ij(rho) )
then this function computes the matrices for H_i and O_ij using the given
density matrix basis. Thus, if `dmbasis` is expressed in the standard
basis (as it should be), the returned matrices are also in this basis.
If these elements are used as projectors it may be usedful to normalize
them (by setting `normalize=True`). Note, however, that these projectors
are not all orthogonal - in particular the O_ij's are not orthogonal to
one another.
Parameters
----------
dmbasis_ham : list
A list of basis matrices {B_i} *including* the identity as the first
element, for the returned Hamiltonian-type error generators. This
argument is easily obtained by call to :func:`pp_matrices` or a
similar function. The matrices are expected to be in the standard
basis, and should be traceless except for the identity. Matrices
should be NumPy arrays or SciPy CSR sparse matrices.
dmbasis_other : list
A list of basis matrices {B_i} *including* the identity as the first
element, for the returned Stochastic-type error generators. This
argument is easily obtained by call to :func:`pp_matrices` or a
similar function. The matrices are expected to be in the standard
basis, and should be traceless except for the identity. Matrices
should be NumPy arrays or SciPy CSR sparse matrices.
normalize : bool
Whether or not generators should be normalized so that
numpy.linalg.norm(generator.flat) == 1.0 Note that the generators
will still, in general, be non-orthogonal.
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error generators to construct.
Allowed values are: `"diagonal"` (only the diagonal Stochastic
generators are returned; that is, the generators corresponding to the
`i==j` terms in the Lindblad expression.), `"diag_affine"` (diagonal +
affine generators), and `"all"` (all generators).
Returns
-------
ham_generators : numpy.ndarray or list of SciPy CSR matrices
If dense matrices where given, an array of shape (d-1,d,d), where d is
the size of the basis, i.e. d == len(dmbasis). `ham_generators[i]`
gives the matrix for H_i. If sparse matrices were given, a list
of shape (d,d) CSR matrices.
other_generators : numpy.ndarray or list of lists of SciPy CSR matrices
If dense matrices where given, An array of shape (d-1,d-1,d,d),
(2,d-1,d,d), or (d-1,d,d), where d is the size of the basis, for
`other_mode` equal to `"all"`, `"diag_affine"`, or `"diagonal"`,
respectively. For instance, in the `"all"` case,
`other_generators[i,j]` gives the matrix for O_ij. If sparse matrices
were given, the all but the final 2 dimensions are lists (e.g. the
`"all"` case returns a list of lists of shape (d,d) CSR matrices).
"""
if dmbasis_ham is not None:
ham_mxs = dmbasis_ham # list of basis matrices (assumed to be in std basis)
ham_nMxs = len(ham_mxs) # usually == d2, but not necessary (e.g. w/maxWeight)
else:
ham_nMxs = 0
if dmbasis_other is not None:
other_mxs = dmbasis_other # list of basis matrices (assumed to be in std basis)
other_nMxs = len(other_mxs) # usually == d2, but not necessary (e.g. w/maxWeight)
else:
other_nMxs = 0
if ham_nMxs > 0:
d = ham_mxs[0].shape[0]
sparse = _sps.issparse(ham_mxs[0])
elif other_nMxs > 0:
d = other_mxs[0].shape[0]
sparse = _sps.issparse(other_mxs[0])
else:
d = 0 # will end up returning no generators
sparse = False
d2 = d**2
normfn = _spsl.norm if sparse else _np.linalg.norm
identityfn = (lambda d: _sps.identity(d, 'd', 'csr')) if sparse else _np.identity
if ham_nMxs > 0 and other_nMxs > 0:
assert(other_mxs[0].shape[0] == ham_mxs[0].shape[0]), \
"Bases must have the same dimension!"
if ham_nMxs > 0:
assert(_np.isclose(normfn(ham_mxs[0] - identityfn(d) / _np.sqrt(d)), 0)),\
"The first matrix in 'dmbasis_ham' must be the identity"
hamLindbladTerms = [None] * (ham_nMxs - 1) if sparse else \
_np.empty((ham_nMxs - 1, d2, d2), 'complex')
for i, B in enumerate(ham_mxs[1:]): # don't include identity
hamLindbladTerms[i] = _lt.hamiltonian_to_lindbladian(B, sparse) # in std basis
if normalize:
norm = normfn(hamLindbladTerms[i]) # same as norm(term.flat)
if not _np.isclose(norm, 0):
hamLindbladTerms[i] /= norm # normalize projector
assert(_np.isclose(normfn(hamLindbladTerms[i]), 1.0))
else:
hamLindbladTerms = None
if other_nMxs > 0:
assert(_np.isclose(normfn(other_mxs[0] - identityfn(d) / _np.sqrt(d)), 0)),\
"The first matrix in 'dmbasis_other' must be the identity"
if other_mode == "diagonal":
otherLindbladTerms = [None] * (other_nMxs - 1) if sparse else \
_np.empty((other_nMxs - 1, d2, d2), 'complex')
for i, Lm in enumerate(other_mxs[1:]): # don't include identity
otherLindbladTerms[i] = _lt.nonham_lindbladian(Lm, Lm, sparse)
if normalize:
norm = normfn(otherLindbladTerms[i]) # same as norm(term.flat)
if not _np.isclose(norm, 0):
otherLindbladTerms[i] /= norm # normalize projector
assert(_np.isclose(normfn(otherLindbladTerms[i]), 1.0))
elif other_mode == "diag_affine":
otherLindbladTerms = [[None] * (other_nMxs - 1)] * 2 if sparse else \
_np.empty((2, other_nMxs - 1, d2, d2), 'complex')
for i, Lm in enumerate(other_mxs[1:]): # don't include identity
otherLindbladTerms[0][i] = _lt.nonham_lindbladian(Lm, Lm, sparse)
otherLindbladTerms[1][i] = _lt.affine_lindbladian(Lm, sparse)
if normalize:
for k in (0, 1):
norm = normfn(otherLindbladTerms[k][i]) # same as norm(term.flat)
if not _np.isclose(norm, 0):
otherLindbladTerms[k][i] /= norm # normalize projector
assert(_np.isclose(normfn(otherLindbladTerms[k][i]), 1.0))
else: # other_mode == "all"
otherLindbladTerms = \
[[None] * (other_nMxs - 1) for i in range(other_nMxs - 1)] if sparse else \
_np.empty((other_nMxs - 1, other_nMxs - 1, d2, d2), 'complex')
for i, Lm in enumerate(other_mxs[1:]): # don't include identity
for j, Ln in enumerate(other_mxs[1:]): # don't include identity
#print("DEBUG NONHAM LIND (%d,%d)" % (i,j)) #DEBUG!!!
otherLindbladTerms[i][j] = _lt.nonham_lindbladian(Lm, Ln, sparse)
if normalize:
norm = normfn(otherLindbladTerms[i][j]) # same as norm(term.flat)
if not _np.isclose(norm, 0):
otherLindbladTerms[i][j] /= norm # normalize projector
assert(_np.isclose(normfn(otherLindbladTerms[i][j]), 1.0))
#I don't think this is true in general, but appears to be true for "pp" basis (why?)
#if j < i: # check that other[i,j] == other[j,i].C, i.e. other is Hermitian
# assert(_np.isclose(_np.linalg.norm(
# otherLindbladTerms[i][j]-
# otherLindbladTerms[j][i].conjugate()),0))
else:
otherLindbladTerms = None
#Check for orthogonality - otherLindblad terms are *not* orthogonal!
#N = otherLindbladTerms.shape[0]
#for i in range(N):
# for j in range(N):
# v1 = otherLindbladTerms[i,j].flatten()
# for k in range(N):
# for l in range(N):
# if k == i and l == j: continue
# v2 = otherLindbladTerms[k,l].flatten()
# if not _np.isclose(0, _np.vdot(v1,v2)):
# print("%d,%d <-> %d,%d dot = %g [%g]" % (i,j,k,l,_np.vdot(v1,v2),_np.dot(v1,v2)))
# #print("v1 = ",v1)
# #print("v2 = ",v2)
# # assert(False)
# #assert(_np.isclose(0, _np.vdot(v1,v2)))
#Check hamiltonian error gens are orthogonal to others
#N = otherLindbladTerms.shape[0]
#for i,hlt in enumerate(hamLindbladTerms):
# v1 = hlt.flatten()
# for j in range(N):
# for k in range(N):
# v2 = otherLindbladTerms[j,k].flatten()
# assert(_np.isclose(0, _np.vdot(v1,v2)))
return hamLindbladTerms, otherLindbladTerms
def lindblad_errgen_projections(errgen, ham_basis,
other_basis, mxBasis="gm",
normalize=True, return_generators=False,
other_mode="all", sparse=False):
"""
Compute the projections of a gate error generator onto generators
for the Lindblad-term errors when expressed in the given
"projection basis".
Parameters
----------
errgen: : ndarray
The error generator matrix to project.
ham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct the Hamiltonian-type lindblad error
Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt), list of numpy arrays, or a custom basis object.
other_basis : {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct the Stochastic-type lindblad error
Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt), list of numpy arrays, or a custom basis object.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source basis. Allowed values are Matrix-unit (std),
Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
normalize : bool, optional
Whether or not the generators being projected onto are normalized, so
that numpy.linalg.norm(generator.flat) == 1.0. Note that the generators
will still, in general, be non-orthogonal.
return_generators : bool, optional
If True, return the error generators projected against along with the
projection values themseves.
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error projections to obtain.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`
(all generators).
sparse : bool, optional
Whether to create sparse or dense basis matrices when strings
are given as `ham_basis` and `other_basis`
Returns
-------
ham_projections : numpy.ndarray
An array of length d-1, where d is the dimension of the gate,
giving the projections onto the Hamiltonian-type Lindblad terms.
other_projections : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d is the dimension
of the gate, for `other_mode` equal to `"all"`, `"diag_affine"`, or
`"diagonal"`, respectively. Values give the projections onto the
non-Hamiltonian-type Lindblad terms.
ham_generators : numpy.ndarray
The Hamiltonian-type Lindblad term generators, as would be returned
from `lindblad_error_generators(pp_matrices(sqrt(d)), normalize)`.
Shape is (d-1,d,d), and `ham_generators[i]` is in the standard basis.
other_generators : numpy.ndarray
The Stochastic-type Lindblad term generators, as would be returned
from `lindblad_error_generators(pp_matrices(sqrt(d)), normalize)`.
Shape is (d-1,d-1,d,d), (2,d-1,d,d), or (d-1,d,d) for `other_mode`
equal to `"all"`, `"diag_affine"`, or `"diagonal"`, respectively,
and `other_generators[i]` is in the std basis.
"""
errgen_std = _bt.change_basis(errgen, mxBasis, "std")
if _sps.issparse(errgen_std):
errgen_std_flat = errgen_std.tolil().reshape(
(errgen_std.shape[0] * errgen_std.shape[1], 1)).tocsr() # b/c lil's are only type that can reshape...
else:
errgen_std_flat = errgen_std.flatten()
errgen_std = None # ununsed below, and sparse reshape doesn't copy, so mark as None
d2 = errgen.shape[0]
d = int(_np.sqrt(d2))
#nQubits = _np.log2(d)
#Get a list of the generators in corresspondence with the
# specified basis elements.
if isinstance(ham_basis, _Basis):
hamBasisMxs = ham_basis.elements
elif isinstance(ham_basis, str):
hamBasisMxs = _bt.basis_matrices(ham_basis, d2, sparse=sparse)
else:
hamBasisMxs = ham_basis
if isinstance(other_basis, _Basis):
otherBasisMxs = other_basis.elements
elif isinstance(other_basis, str):
otherBasisMxs = _bt.basis_matrices(other_basis, d2, sparse=sparse)
else:
otherBasisMxs = other_basis
hamGens, otherGens = lindblad_error_generators(
hamBasisMxs, otherBasisMxs, normalize, other_mode) # in std basis
if hamBasisMxs is not None:
bsH = len(hamBasisMxs) # basis size (not necessarily d2)
else: bsH = 0
if otherBasisMxs is not None:
bsO = len(otherBasisMxs) # basis size (not necessarily d2)
else: bsO = 0
if bsH > 0: sparse = _sps.issparse(hamBasisMxs[0])
elif bsO > 0: sparse = _sps.issparse(otherBasisMxs[0])
else: sparse = False # default?
assert(_np.isclose(d * d, d2)) # d2 must be a perfect square
if bsH > 0:
_assert_shape(hamGens, (bsH - 1, d2, d2), sparse)
if bsO > 0:
if other_mode == "diagonal":
_assert_shape(otherGens, (bsO - 1, d2, d2), sparse)
elif other_mode == "diag_affine":
_assert_shape(otherGens, (2, bsO - 1, d2, d2), sparse)
else: # other_mode == "all"
_assert_shape(otherGens, (bsO - 1, bsO - 1, d2, d2), sparse)
#Perform linear least squares solve to find "projections" onto each otherGens element - defined so that
# sum_i projection_i * otherGen_i = (errgen_std-ham_errgen) as well as possible.
#ham_error_gen = _np.einsum('i,ijk', hamProjs, hamGens)
#other_errgen = errgen_std - ham_error_gen #what's left once hamiltonian errors are projected out
#Do linear least squares soln to expressing errgen_std as a linear combo
# of the lindblad generators
if bsH > 0:
if not sparse:
H = hamGens.reshape((bsH - 1, d2**2)).T # ham generators == columns
Hdag = H.T.conjugate()
#Do linear least squares: this is what takes the bulk of the time
hamProjs = _np.linalg.solve(_np.dot(Hdag, H), _np.dot(Hdag, errgen_std_flat))
hamProjs.shape = (hamGens.shape[0],)
else:
rows = [hamGen.tolil().reshape((1, d2**2)) for hamGen in hamGens]
H = _sps.vstack(rows, 'csr').transpose()
Hdag = H.copy().transpose().conjugate()
#Do linear least squares: this is what takes the bulk of the time
if _mt.safenorm(errgen_std_flat) < 1e-8: # protect against singular RHS
hamProjs = _np.zeros(bsH - 1, 'd')
else:
hamProjs = _spsl.spsolve(Hdag.dot(H), Hdag.dot(errgen_std_flat))
if _sps.issparse(hamProjs): hamProjs = hamProjs.toarray().flatten()
hamProjs.shape = (bsH - 1,)
else:
hamProjs = None
if bsO > 0:
if not sparse:
if other_mode == "diagonal":
O = otherGens.reshape((bsO - 1, d2**2)).T # other generators == columns
elif other_mode == "diag_affine":
O = otherGens.reshape((2 * (bsO - 1), d2**2)).T # other generators == columns
else:
O = otherGens.reshape(((bsO - 1)**2, d2**2)).T # other generators == columns
Odag = O.T.conjugate()
#Do linear least squares: this is what takes the bulk of the time
otherProjs = _np.linalg.solve(_np.dot(Odag, O), _np.dot(Odag, errgen_std_flat))
if other_mode == "diagonal":
otherProjs.shape = (otherGens.shape[0],)
elif other_mode == "diag_affine":
otherProjs.shape = (2, otherGens.shape[1])
else:
otherProjs.shape = (otherGens.shape[0], otherGens.shape[1])
else:
if other_mode == "diagonal":
rows = [oGen.tolil().reshape((1, d2**2)) for oGen in otherGens]
O = _sps.vstack(rows, 'csr').transpose() # other generators == columns
else: # "diag_affine" or "all"
rows = [oGen.tolil().reshape((1, d2**2)) for oGenRow in otherGens for oGen in oGenRow]
O = _sps.vstack(rows, 'csr').transpose() # other generators == columns
Odag = O.copy().transpose().conjugate() # TODO: maybe conjugate copies data?
#Do linear least squares: this is what takes the bulk of the time
if _mt.safenorm(errgen_std_flat) < 1e-8: # protect against singular RHS
if other_mode == "diagonal": otherProjs = _np.zeros(bsO - 1, 'd')
elif other_mode == "diag_affine": otherProjs = _np.zeros((2, bsO - 1), 'd')
else: otherProjs = _np.zeros((bsO - 1, bsO - 1), 'd')
else:
otherProjs = _spsl.spsolve(Odag.dot(O), Odag.dot(errgen_std_flat))
if _sps.issparse(otherProjs): otherProjs = otherProjs.toarray().flatten()
if other_mode == "diagonal":
otherProjs.shape = (bsO - 1,)
elif other_mode == "diag_affine":
otherProjs.shape = (2, bsO - 1)
else: # other_mode == "all"
otherProjs.shape = (bsO - 1, bsO - 1)
else:
otherProjs = None
#check err gens are linearly independent -- but can take a very long time, so comment out!
#assert(_np.linalg.matrix_rank(H,1e-7) == H.shape[1])
#assert(_np.linalg.matrix_rank(O,1e-7) == O.shape[1])
#if False: # further check against older (slower) version
# M = _np.concatenate( (hamGens.reshape((bs-1,d2**2)).T, otherGens.reshape(((bs-1)**2,d2**2)).T), axis=1)
# assert(_np.linalg.matrix_rank(M,1e-7) == M.shape[1]) #check err gens are linearly independent
# Mdag = M.T.conjugate()
# print("DB D: %.1f" % (time.time()-t)); t = time.time()
# projs = _np.linalg.solve(_np.dot(Mdag,M), _np.dot(Mdag,errgen_std_flat))
# hamProjs_chk = projs[0:(bs-1)]
# otherProjs_chk = projs[(bs-1):]
# assert(_np.linalg.norm(hamProjs-hamProjs_chk) < 1e-6)
# assert(_np.linalg.norm(otherProjs-otherProjs_chk) < 1e-6)
if return_generators:
return hamProjs, otherProjs, hamGens, otherGens
else:
return hamProjs, otherProjs
def projections_to_lindblad_terms(hamProjs, otherProjs, ham_basis, other_basis,
other_mode="all", return_basis=True):
"""
Converts the projections of an error generator onto basis elements into
the Lindblad-term dictionary and basis used to individually specify
Lindblad terms.
Parameters
----------
hamProjs : numpy.ndarray
An array of length d-1, where d is the dimension of the projected error
generator, giving the projections onto the Hamiltonian-type Lindblad
terms.
otherProjs : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d is the dimension
of the projected error generator, for `other_mode` equal to `"all"`,
`"diag_affine"`, or `"diagonal"`, respectively. Values give the
projections onto the non-Hamiltonian-type Lindblad terms.
ham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct `hamProjs`. Allowed values are Matrix-unit
(std), Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt), list of
numpy arrays, or a custom basis object.
other_basis : {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct `otherProjs`. Allowed values are
Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt),
list of numpy arrays, or a custom basis object.
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error projections `otherProjs` includes.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`
(all generators).
return_basis : bool, optional
Whether to return a :class:`Basis` containing the elements
corresponding to labels within the returned `Ltermdict`.
Returns
-------
Ltermdict : dict
Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), or
`"A"` (Affine). Hamiltonian and Affine terms always have a single basis
label (so key is a 2-tuple) whereas Stochastic tuples have 1 basis label
to indicate a *diagonal* term and otherwise have 2 basis labels to
specify off-diagonal non-Hamiltonian Lindblad terms. Basis labels
are taken from `ham_basis` and `other_basis`. Values are complex
coefficients (the projections).
basis : Basis
A single basis containing all the basis labels used in `Ltermdict` (and
*only* those elements). Only returned when `return_basis == True`.
"""
assert(not (ham_basis is None and other_basis is None)), \
"At least one of `ham_basis` and `other_basis` must be non-None"
# Make None => length-0 arrays so iteration code works below (when basis is None)
if hamProjs is None: hamProjs = _np.empty(0, 'd')
if otherProjs is None:
otherProjs = _np.empty(0, 'd') if other_mode == "diagonal" \
else _np.empty((0, 0), 'd')
# Construct a pair of dictionaries describing all of the
# Lindblad-terms:
# Ltermdict keys= ('H',basisLbl), ('S',basisLbl), or ('S',bLbl1,bLbl2)
# vals= coefficients of these terms (projections from errgen)
# basisdict keys= basis labels (just has to match Ltermdict keys)
# vals= basis matrices - can be either sparse or dense
Ltermdict = _collections.OrderedDict()
basisdict = _collections.OrderedDict()
if return_basis:
def set_basis_el(blbl, bel):
""" Sets an elment of basisdict, checking for consistency """
if blbl in basisdict:
assert(_mt.safenorm(basisdict[blbl] - bel) < 1e-8), "Ambiguous basis el label %s" % blbl
else:
basisdict[blbl] = bel
else:
def set_basis_el(blbl, bel):
pass
#Add Hamiltonian error elements
if ham_basis is not None:
ham_lbls = ham_basis.labels
ham_mxs = ham_basis.elements # can be sparse
assert(len(ham_mxs[1:]) == len(hamProjs))
for coeff, lbl, bmx in zip(hamProjs, ham_lbls[1:], ham_mxs[1:]): # skip identity
Ltermdict[('H', lbl)] = coeff
set_basis_el(lbl, bmx)
else:
ham_lbls = []
#Add "other" error elements
if other_basis is not None:
other_lbls = other_basis.labels
other_mxs = other_basis.elements # can be sparse
if other_mode == "diagonal":
assert(len(other_mxs[1:]) == len(otherProjs))
for coeff, lbl, bmx in zip(otherProjs, other_lbls[1:], other_mxs[1:]): # skip identity
Ltermdict[('S', lbl)] = coeff
set_basis_el(lbl, bmx)
elif other_mode == "diag_affine":
assert((2, len(other_mxs[1:])) == otherProjs.shape)
for coeff, lbl, bmx in zip(otherProjs[0], other_lbls[1:], other_mxs[1:]): # skip identity
Ltermdict[('S', lbl)] = coeff
set_basis_el(lbl, bmx)
for coeff, lbl, bmx in zip(otherProjs[1], other_lbls[1:], other_mxs[1:]): # skip identity
Ltermdict[('A', lbl)] = coeff
set_basis_el(lbl, bmx)
else:
assert((len(other_mxs[1:]), len(other_mxs[1:])) == otherProjs.shape)
for i, (lbl1, bmx1) in enumerate(zip(other_lbls[1:], other_mxs[1:])): # skip identity
set_basis_el(lbl1, bmx1)
for j, (lbl2, bmx2) in enumerate(zip(other_lbls[1:], other_mxs[1:])): # skip identity
set_basis_el(lbl2, bmx2)
Ltermdict[('S', lbl1, lbl2)] = otherProjs[i, j]
else:
other_lbls = []
#Turn basisdict into a Basis to return
if return_basis:
if ham_basis == other_basis:
basis = ham_basis
elif ham_basis is None or set(ham_lbls).issubset(set(other_lbls)):
basis = other_basis
elif other_basis is None or set(other_lbls).issubset(set(ham_lbls)):
basis = ham_basis
else:
#Create an ExplictBasis using the matrices in basisdict plus the identity
sparse = True; real = True
if ham_basis is not None:
elshape = ham_basis.elshape
sparse = sparse and ham_basis.sparse
real = real and ham_basis.real
if other_basis is not None:
elshape = other_basis.elshape
sparse = sparse and other_basis.sparse
real = real and other_basis.real
d = elshape[0]
Id = _sps.identity(d, 'complex', 'csr') / _np.sqrt(d) if sparse \
else _np.identity(d, 'complex') / _np.sqrt(d)
lbls = ['I'] + list(basisdict.keys())
mxs = [Id] + list(basisdict.values())
basis = _ExplicitBasis(mxs, lbls, name=None,
real=real, sparse=sparse)
return Ltermdict, basis
else:
return Ltermdict
def lindblad_terms_to_projections(Ltermdict, basis, other_mode="all"):
"""
Convert a set of Lindblad terms into a dense matrix/grid of projections.
Essentially the inverse of :function:`projections_to_lindblad_terms`.
Parameters
----------
Ltermdict : dict
A dictionary specifying which Linblad terms are present in the gate
parameteriztion. Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), or
`"A"` (Affine). Hamiltonian and Affine terms always have a single basis
label (so key is a 2-tuple) whereas Stochastic tuples with 1 basis label
indicate a *diagonal* term, and are the only types of terms allowed when
`nonham_mode != "all"`. Otherwise, Stochastic term tuples can include 2
basis labels to specify "off-diagonal" non-Hamiltonian Lindblad terms.
Basis labels can be strings or integers. Values are complex
coefficients (error rates).
basis : Basis, optional
A basis mapping the labels used in the keys of `Ltermdict` to
basis matrices (e.g. numpy arrays or Scipy sparse matrices). The
first element of this basis should be an identity element, and
will be propagated to the returned `ham_basis` and `other_basis`.
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian terms are allowed in `Ltermdict`.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`
(all generators).
Returns
-------
hamProjs : numpy.ndarray
An array of length `basisdim-1`, giving the projections onto a
full set of the Hamiltonian-type Lindblad terms (onto each element of
`ham_basis`).
otherProjs : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d=`basisdim`
for `other_mode` equal to `"all"`, `"diag_affine"`, or `"diagonal"`,
respectively. Values give the projections onto the non-Hamiltonian
-type Lindblad terms.
ham_basis: Basis
The basis used to construct `hamProjs`.
other_basis : Basis
The basis used to construct `otherProjs`.
hamBasisIndices : OrderedDict
A dictionary mapping the some or all of the basis labels of `basisdict`
to the integers 0 to `len(ham_basis)`. These are indices into
`hamProjs`, giving the projection associated with each Hamiltonian
basis element.
otherBasisIndices : OrderedDict
A dictionary mapping the some or all of the basis labels of `basisdict`
to the integers 0 to `len(other_basis)`. These are row and column
indices into `otherProjs`, giving the projection associated with each
pair of "other" basis elements (or single basis element if
`other_mode!="all"`).
"""
#Separately enumerate the (distinct) basis elements used for Hamiltonian
# and non-Hamiltonian error terms
#print("DB: lindblad term to proj: \n",Ltermdict,"\n",basis)
hamBasisLabels = []
otherBasisLabels = []
for termLbl, coeff in Ltermdict.items():
if isinstance(termLbl, str): termLbl = (termLbl[0], termLbl[1:]) # e.g. "HXX" => ('H','XX')
termType = termLbl[0]
if termType == "H": # Hamiltonian
assert(len(termLbl) == 2), "Hamiltonian term labels should have form ('H',<basis element label>)"
if termLbl[1] not in hamBasisLabels:
hamBasisLabels.append(termLbl[1])
elif termType == "S": # Stochastic
if other_mode in ("diagonal", "diag_affine"):
assert(len(termLbl) == 2), "Stochastic term labels should have form ('S',<basis element label>)"
if termLbl[1] not in otherBasisLabels:
otherBasisLabels.append(termLbl[1])
else:
assert(len(termLbl) == 3), "Stochastic term labels should have form ('S',<bel1>, <bel2>)"
if termLbl[1] not in otherBasisLabels:
otherBasisLabels.append(termLbl[1])
if termLbl[2] not in otherBasisLabels:
otherBasisLabels.append(termLbl[2])
elif termType == "A": # Affine
assert(other_mode == "diag_affine"), "Affine labels are only allowed in an affine mode"
assert(len(termLbl) == 2), "Affine term labels should have form ('A',<basis element label>)"
if termLbl[1] not in otherBasisLabels:
otherBasisLabels.append(termLbl[1])
#Construct bases
# Note: the lists of basis matrices shouldn't contain the identity, since
# the terms above shouldn't contain identity terms - but `basis` should
# contain an identity element as it's first element, so add this identity el
# to non-empty bases (empty bases stay empty!) to be consistent with the
# rest of the framework (bases *have* Ids)
sparse = basis.sparse
if set(hamBasisLabels) == set(basis.labels):
ham_basis = basis
else:
Id = basis[0]
ham_basis_mxs = [basis[bl] for bl in hamBasisLabels]
if len(ham_basis_mxs) > 0:
ham_basis = _ExplicitBasis([Id] + ham_basis_mxs, ['I'] + hamBasisLabels,
name=None, real=True, sparse=sparse)
else:
ham_basis = _ExplicitBasis(ham_basis_mxs, name=None, real=True, sparse=sparse)
if set(otherBasisLabels) == set(basis.labels):
other_basis = basis
else:
Id = basis[0]
other_basis_mxs = [basis[bl] for bl in otherBasisLabels]
if len(other_basis_mxs) > 0:
other_basis = _ExplicitBasis([Id] + other_basis_mxs, ['I'] + otherBasisLabels,
name=None, real=True, sparse=sparse)
else:
other_basis = _ExplicitBasis(other_basis_mxs, name=None, real=True, sparse=sparse)
bsH, bsO = len(ham_basis), len(other_basis)
#print("DB: constructed ham_basis = ",ham_basis)
#print("DB: other basis = ",other_basis)
#Create projection (term coefficient) arrays - or return None if
# the corresponding basis is empty (as per our convention)
hamProjs = _np.zeros(bsH - 1, 'complex') if bsH > 0 else None
if bsO > 0:
if other_mode == "diagonal": # OK if this runs for 'auto' too since then len(otherBasisIndices) == 0
otherProjs = _np.zeros(bsO - 1, 'complex')
elif other_mode == "diag_affine":
otherProjs = _np.zeros((2, bsO - 1), 'complex')
else:
otherProjs = _np.zeros((bsO - 1, bsO - 1), 'complex')
else: otherProjs = None
#Fill arrays
hamBasisIndices = {lbl: i - 1 for i, lbl in enumerate(ham_basis.labels)} # -1 to compensate for identity as
otherBasisIndices = {lbl: i - 1 for i, lbl in enumerate(other_basis.labels)} # first element (not in projections).
for termLbl, coeff in Ltermdict.items():
if isinstance(termLbl, str): termLbl = (termLbl[0], termLbl[1:]) # e.g. "HXX" => ('H','XX')
termType = termLbl[0]
if termType == "H": # Hamiltonian
k = hamBasisIndices[termLbl[1]] # index of coefficient in array
hamProjs[k] = coeff
elif termType == "S": # Stochastic
if other_mode == "diagonal":
k = otherBasisIndices[termLbl[1]] # index of coefficient in array
otherProjs[k] = coeff
elif other_mode == "diag_affine":
k = otherBasisIndices[termLbl[1]] # index of coefficient in array
otherProjs[0, k] = coeff
else: # other_mode == "all"
k = otherBasisIndices[termLbl[1]] # index of row in "other" coefficient matrix
j = otherBasisIndices[termLbl[2]] # index of col in "other" coefficient matrix
otherProjs[k, j] = coeff
elif termType == "A": # Affine
assert(other_mode == "diag_affine")
k = otherBasisIndices[termLbl[1]] # index of coefficient in array
otherProjs[1, k] = coeff
return hamProjs, otherProjs, ham_basis, other_basis
def lindblad_projections_to_paramvals(hamProjs, otherProjs, param_mode="cptp",
other_mode="all", truncate=True):
"""
Construct the array of Lindblad-gate parameter values from the separate
arrays of Hamiltonian and non-Hamiltonian Lindblad-term projections.
When `cptp=True`, this function handles parameterizing the projections
to that for (real) parameter values correspond to projections for a valid
CPTP gate (e.g. by parameterizing the Cholesky decomposition of `otherProjs`
instead of otherProjs itself). This function is closely related to
implementation details of the LindbladOp class.
Parameters
----------
hamProjs : numpy.ndarray
An array of length d-1, where d is the gate dimension, giving the
projections onto a full set of the Hamiltonian-type Lindblad terms.
otherProjs : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d is the gate
dimension, for `other_mode` equal to `"all"`,`"diag_affine"`, or
`"diagonal"`, respectively. Values give the projections onto a full
set of non-Hamiltonian-type Lindblad terms.
param_mode : {"unconstrained", "cptp", "depol", "reldepol"}
Describes how values in `hamProjs` and `otherProj` relate to the
returned parameter values. Allowed values are:
`"unconstrained"` (projs are independent unconstrained parameters),
`"cptp"` (independent parameters but constrained so map is CPTP),
`"reldepol"` (all non-Ham. diagonal projs take the *same* value),
`"depol"` (same as `"reldepol"` but projs must be *positive*)
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error projections `otherProjs` includes.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`.
truncate : bool, optional
Whether to truncate the projections onto the Lindblad terms in
order to meet constraints (e.g. to preserve CPTP) when necessary.
If False, then an error is thrown when the given projections
cannot be parameterized as specified.
Returns
-------
numpy.ndarray
A 1D array of real parameter values consisting of d-1 Hamiltonian
values followed by either (d-1)^2, 2*(d-1), or just d-1 non-Hamiltonian
values for `other_mode` equal to `"all"`, `"diag_affine"`, or
`"diagonal"`, respectively.
"""
if hamProjs is not None:
assert(_np.isclose(_np.linalg.norm(hamProjs.imag), 0)), \
"Hamiltoian projections (coefficients) are not all real!"
hamParams = hamProjs.real
else:
hamParams = _np.empty(0, 'd')
if otherProjs is not None:
if other_mode == "diagonal":
assert(_np.isclose(_np.linalg.norm(_np.imag(otherProjs)), 0)), \
"Diagonal stochastic projections (coefficients) are not all real!"
if param_mode == "depol": # otherParams is a *single-element* 1D vector of the sqrt of each diagonal el
assert(truncate or all([v >= -1e-12 for v in otherProjs])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
assert(truncate or all([_np.isclose(v, otherProjs[0]) for v in otherProjs])), \
"Diagonal lindblad coefficients are not equal (truncate == False)!"
otherProj = _np.mean(otherProjs.clip(1e-16, 1e100))
otherParams = _np.array(_np.sqrt(_np.real(otherProj)), 'd') # shape (1,)
elif param_mode == "cptp": # otherParams is a 1D vector of the sqrts of diagonal els
assert(truncate or all([v >= -1e-12 for v in otherProjs])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
otherProjs = otherProjs.clip(1e-16, 1e100)
otherParams = _np.sqrt(otherProjs.real) # shape (bsO-1,)
else: # "unconstrained": otherParams is a 1D vector of the real diagonal els of otherProjs
otherParams = otherProjs.real # shape (bsO-1,)
elif other_mode == "diag_affine":
assert(_np.isclose(_np.linalg.norm(_np.imag(otherProjs)), 0)), \
"Diagonal stochastic and affine projections (coefficients) are not all real!"
if param_mode == "depol": # otherParams is a single depol value + unconstrained affine coeffs
assert(truncate or all([v >= -1e-12 for v in otherProjs[0]])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
assert(truncate or all([_np.isclose(v, otherProjs[0, 0]) for v in otherProjs[0]])), \
"Diagonal lindblad coefficients are not equal (truncate == False)!"
depolProj = _np.mean(otherProjs[0, :].clip(1e-16, 1e100))
otherParams = _np.concatenate(([_np.sqrt(_np.real(depolProj))],
otherProjs[1].real)) # shape (1+(bsO-1),)
elif param_mode == "cptp": # Note: does not constrained affine coeffs to CPTP
assert(truncate or all([v >= -1e-12 for v in otherProjs[0]])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
diagParams = _np.sqrt(_np.real(otherProjs[0, :]).clip(1e-16, 1e100)) # shape (bsO-1,)
otherParams = _np.concatenate((diagParams, otherProjs[1].real)) # diag + affine params
else: # param_mode == "unconstrained": otherParams is a 1D vector of the real diagonal els of otherProjs
otherParams = otherProjs.real # shape (2,bsO-1)
else: # other_mode == "all"
assert(_np.isclose(_np.linalg.norm(otherProjs - otherProjs.T.conjugate()), 0)
), "Other projection/coefficient mx is not Hermitian!"
assert(param_mode != "depol"), "`depol` is not supported when `other_mode == 'all'`"
bsO = otherProjs.shape[0] + 1 # +1 to keep convention that this is the basis (w/Identity) size
otherParams = _np.empty((bsO - 1, bsO - 1), 'd')
if param_mode == "cptp": # otherParams mx stores Cholesky decomp
#push any slightly negative evals of otherProjs positive so that
# the Cholesky decomp will work.
evals, U = _np.linalg.eig(otherProjs)
Ui = _np.linalg.inv(U)
assert(truncate or all([ev >= -1e-12 for ev in evals])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
pos_evals = evals.clip(1e-16, 1e100)
otherProjs = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui))
try:
Lmx = _np.linalg.cholesky(otherProjs)
# if Lmx not postitive definite, try again with 1e-12 (same lines as above)
except _np.linalg.LinAlgError: # pragma: no cover
pos_evals = evals.clip(1e-12, 1e100) # pragma: no cover
otherProjs = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui)) # pragma: no cover
Lmx = _np.linalg.cholesky(otherProjs) # pragma: no cover
for i in range(bsO - 1):
assert(_np.linalg.norm(_np.imag(Lmx[i, i])) < IMAG_TOL)
otherParams[i, i] = Lmx[i, i].real
for j in range(i):
otherParams[i, j] = Lmx[i, j].real
otherParams[j, i] = Lmx[i, j].imag
else: # param_mode == "unconstrained": otherParams mx stores otherProjs (hermitian) directly
for i in range(bsO - 1):
assert(_np.linalg.norm(_np.imag(otherProjs[i, i])) < IMAG_TOL)
otherParams[i, i] = otherProjs[i, i].real
for j in range(i):
otherParams[i, j] = otherProjs[i, j].real
otherParams[j, i] = otherProjs[i, j].imag
else:
otherParams = _np.empty(0, 'd')
assert(not _np.iscomplexobj(hamParams)) # params should always
assert(not _np.iscomplexobj(otherParams)) # be *real*
return _np.concatenate((hamParams, otherParams.flat))
def paramvals_to_lindblad_projections(paramvals, ham_basis_size,
other_basis_size, param_mode="cptp",
other_mode="all", Lmx=None):
"""
Construct the separate arrays of Hamiltonian and non-Hamiltonian
Lindblad-term projections from the array of Lindblad-gate parameter values.
This function essentially performs the inverse of
:function:`lindblad_projections_to_paramvals`.
Parameters
----------
paramvals : numpy.ndarray
A 1D array of real parameter values consisting of d-1 Hamiltonian
values followed by either (d-1)^2 or just d-1 non-Hamiltonian
values (the latter when `other_mode in ('diagonal','diag_affine')`).
ham_basis_size, other_basis_size : int
The number of elements in the Hamiltonian and non-Hamiltonian
bases used to construct `paramvals`. As such, `ham_basis_size`
gives the offset into `paramvals` where the non-Hamiltonian
parameters begin.
param_mode : {"unconstrained", "cptp", "depol", "reldepol"}
Specifies how the Lindblad-term coefficients are mapped to the set of
(real) parameter values. This really just applies to the "other"
(non-Hamiltonian) coefficients. "unconstrained" means that ranging
over the parameter values lets the coefficient-matrix vary over all
matrices, "cptp" restricts this to postitive matrices. "depol"
maps all of the coefficients to the *same, positive* parameter (only
available for "diagonal" and "diag_affine" other-modes), and "reldepol"
does the same thing but without the positivity constraint.
other_mode : {"all", "diagonal", "diag_affine"}
Specifies the structure of the matrix of other (non-Hamiltonian)
coefficients. If d is the gate dimension, "all" means a (d-1,d-1)
matrix is used; "diagonal" means just the (d2-1,) diagonal of this
matrix is used; "diag_affine" means the coefficients are in a (2,d2-1)
array with the diagonal-term coefficients being the first row and the
affine coefficients being the second row.
Lmx : ndarray, optional
Scratch space that is used to store the lower-triangular
Cholesky decomposition matrix that is used to construct
the "other" projections when there is a CPTP constraint.
Returns
-------
hamProjs : numpy.ndarray
An array of length d-1, where d is the gate dimension, giving the
projections onto a full set of the Hamiltonian-type Lindblad terms.
otherProjs : numpy.ndarray
An array of shape (d-1,d-1) or (d-1,) or (2,d-1) where d is the gate
dimension, giving the projections onto a full set of non-Hamiltonian
-type Lindblad terms (see `other_mode` above).
"""
bsH = ham_basis_size
bsO = other_basis_size
if Lmx is None:
Lmx = _np.zeros((bsO - 1, bsO - 1), 'complex') if bsO > 0 else None
# self.paramvals = [hamCoeffs] + [otherParams]
# where hamCoeffs are *real* and of length d2-1 (self.dim == d2)
if bsH > 0:
hamCoeffs = paramvals[0:bsH - 1]
nHam = bsH - 1
else:
hamCoeffs = None
nHam = 0
#built up otherCoeffs based on param_mode and nonham_mode
if bsO > 0:
if other_mode == "diagonal":
otherParams = paramvals[nHam:]
expected_shape = (1,) if (param_mode in ("depol", "reldepol")) else (bsO - 1,)
assert(otherParams.shape == expected_shape)
if param_mode in ("depol", "reldepol"):
otherParams = otherParams[0] * _np.ones(bsO - 1, 'd') # replicate single param bsO-1 times
if param_mode in ("cptp", "depol"):
otherCoeffs = otherParams**2 # Analagous to L*L_dagger
else: # "unconstrained"
otherCoeffs = otherParams
elif other_mode == "diag_affine":
if param_mode in ("depol", "reldepol"):
otherParams = paramvals[nHam:].reshape((1 + bsO - 1,))
otherCoeffs = _np.empty((2, bsO - 1), 'd') # leave as real type b/c doesn't have complex entries
if param_mode == "depol":
otherCoeffs[0, :] = otherParams[0]**2
else:
otherCoeffs[0, :] = otherParams[0]
otherCoeffs[1, :] = otherParams[1:]
else:
otherParams = paramvals[nHam:].reshape((2, bsO - 1))
if param_mode == "cptp":
otherCoeffs = otherParams.copy()
otherCoeffs[0, :] = otherParams[0]**2
else: # param_mode == "unconstrained"
#otherCoeffs = _np.empty((2,bsO-1),'complex')
otherCoeffs = otherParams
else: # other_mode == "all"
otherParams = paramvals[nHam:].reshape((bsO - 1, bsO - 1))
if param_mode == "cptp":
# otherParams is an array of length (bs-1)*(bs-1) that
# encodes a lower-triangular matrix "Lmx" via:
# Lmx[i,i] = otherParams[i,i]
# Lmx[i,j] = otherParams[i,j] + 1j*otherParams[j,i] (i > j)
for i in range(bsO - 1):
Lmx[i, i] = otherParams[i, i]
for j in range(i):
Lmx[i, j] = otherParams[i, j] + 1j * otherParams[j, i]
#The matrix of (complex) "other"-coefficients is build by
# assuming Lmx is its Cholesky decomp; means otherCoeffs
# is pos-def.
# NOTE that the Cholesky decomp with all positive real diagonal
# elements is *unique* for a given positive-definite otherCoeffs
# matrix, but we don't care about this uniqueness criteria and so
# the diagonal els of Lmx can be negative and that's fine -
# otherCoeffs will still be posdef.
otherCoeffs = _np.dot(Lmx, Lmx.T.conjugate())
#DEBUG - test for pos-def
#evals = _np.linalg.eigvalsh(otherCoeffs)
#DEBUG_TOL = 1e-16; #print("EVALS DEBUG = ",evals)
#assert(all([ev >= -DEBUG_TOL for ev in evals]))
else: # param_mode == "unconstrained"
#otherParams holds otherCoeff real and imaginary parts directly
otherCoeffs = _np.empty((bsO - 1, bsO - 1), 'complex')
for i in range(bsO - 1):
otherCoeffs[i, i] = otherParams[i, i]
for j in range(i):
otherCoeffs[i, j] = otherParams[i, j] + 1j * otherParams[j, i]
otherCoeffs[j, i] = otherParams[i, j] - 1j * otherParams[j, i]
else:
otherCoeffs = None
return hamCoeffs, otherCoeffs
#TODO: replace two_qubit_gate, one_qubit_gate, unitary_to_pauligate_* with
# calls to this one and unitary_to_processmx
def rotation_gate_mx(r, mxBasis="gm"):
"""
Construct a rotation operation matrix.
Build the operation matrix corresponding to the unitary
`exp(-i * (r[0]/2*PP[0]*sqrt(d) + r[1]/2*PP[1]*sqrt(d) + ...) )`
where `PP' is the array of Pauli-product matrices
obtained via `pp_matrices(d)`, where `d = sqrt(len(r)+1)`.
The division by 2 is for convention, and the sqrt(d) is to
essentially un-normalise the matrices returned by `pp_matrices`
to they are equal to products of the *standard* Pauli matrices.
Parameters
----------
r : tuple
A tuple of coeffiecients, one per non-identity
Pauli-product basis element
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
.
Returns
-------
numpy array
a d^2 x d^2 operation matrix in the specified basis.
"""
d = int(round(_np.sqrt(len(r) + 1)))
assert(d**2 == len(r) + 1), "Invalid number of rotation angles"
#get Pauli-product matrices (in std basis)
pp = _bt.basis_matrices('pp', d**2)
assert(len(r) == len(pp[1:]))
#build unitary (in std basis)
ex = _np.zeros((d, d), 'complex')
for rot, pp_mx in zip(r, pp[1:]):
ex += rot / 2.0 * pp_mx * _np.sqrt(d)
U = _spl.expm(-1j * ex)
stdGate = unitary_to_process_mx(U)
ret = _bt.change_basis(stdGate, 'std', mxBasis)
return ret
def project_model(model, targetModel,
projectiontypes=('H', 'S', 'H+S', 'LND'),
genType="logG-logT"):
"""
Construct one or more new models by projecting the error generator of
`model` onto some sub-space then reconstructing.
Parameters
----------
model : Model
The model whose error generator should be projected.
targetModel : Model
The set of target (ideal) gates.
projectiontypes : tuple of {'H','S','H+S','LND','LNDCP'}
Which projections to use. The length of this tuple gives the
number of `Model` objects returned. Allowed values are:
- 'H' = Hamiltonian errors
- 'S' = Stochastic Pauli-channel errors
- 'H+S' = both of the above error types
- 'LND' = errgen projected to a normal (CPTP) Lindbladian
- 'LNDF' = errgen projected to an unrestricted (full) Lindbladian
genType : {"logG-logT", "logTiG"}
The type of error generator to compute. Allowed values are:
- "logG-logT" : errgen = log(gate) - log(target_op)
- "logTiG" : errgen = log( dot(inv(target_op), gate) )
Returns
-------
projected_models : list of Models
Elements are projected versions of `model` corresponding to
the elements of `projectiontypes`.
Nps : list of parameter counts
Integer parameter counts for each model in `projected_models`.
Useful for computing the expected log-likelihood or chi2.
"""
opLabels = list(model.operations.keys()) # operation labels
basis = model.basis
#The projection basis needs to be a basis for density matrices
# (i.e. 2x2 mxs in 1Q case) rather than superoperators (4x4 mxs
# in 1Q case) - whcih is what model.basis is. So, we just extract
# a builtin basis name for the projection basis.
if basis.name in ('pp', 'gm', 'std', 'qt'):
proj_basis_name = basis.name
else:
proj_basis_name = 'pp' # model.basis is weird so just use paulis as projection basis
if basis.name != targetModel.basis.name:
raise ValueError("Basis mismatch between model (%s) and target (%s)!"
% (model.basis.name, targetModel.basis.name))
# Note: set to "full" parameterization so we can set the gates below
# regardless of what parameterization the original model had.
gsDict = {}; NpDict = {}
for p in projectiontypes:
gsDict[p] = model.copy()
gsDict[p].set_all_parameterizations("full")
NpDict[p] = 0
errgens = [error_generator(model.operations[gl],
targetModel.operations[gl],
targetModel.basis, genType)
for gl in opLabels]
for gl, errgen in zip(opLabels, errgens):
if ('H' in projectiontypes) or ('H+S' in projectiontypes):
hamProj, hamGens = std_errgen_projections(
errgen, "hamiltonian", proj_basis_name, basis, True)
#ham_error_gen = _np.einsum('i,ijk', hamProj, hamGens)
ham_error_gen = _np.tensordot(hamProj, hamGens, (0, 0))
ham_error_gen = _bt.change_basis(ham_error_gen, "std", basis)
if ('S' in projectiontypes) or ('H+S' in projectiontypes):
stoProj, stoGens = std_errgen_projections(
errgen, "stochastic", proj_basis_name, basis, True)
#sto_error_gen = _np.einsum('i,ijk', stoProj, stoGens)
sto_error_gen = _np.tensordot(stoProj, stoGens, (0, 0))
sto_error_gen = _bt.change_basis(sto_error_gen, "std", basis)
if ('LND' in projectiontypes) or ('LNDF' in projectiontypes):
HProj, OProj, HGens, OGens = \
lindblad_errgen_projections(
errgen, proj_basis_name, proj_basis_name, basis, normalize=False,
return_generators=True)
#Note: return values *can* be None if an empty/None basis is given
#lnd_error_gen = _np.einsum('i,ijk', HProj, HGens) + \
# _np.einsum('ij,ijkl', OProj, OGens)
lnd_error_gen = _np.tensordot(HProj, HGens, (0, 0)) + \
_np.tensordot(OProj, OGens, ((0, 1), (0, 1)))
lnd_error_gen = _bt.change_basis(lnd_error_gen, "std", basis)
targetOp = targetModel.operations[gl]
if 'H' in projectiontypes:
gsDict['H'].operations[gl] = operation_from_error_generator(
ham_error_gen, targetOp, genType)
NpDict['H'] += len(hamProj)
if 'S' in projectiontypes:
gsDict['S'].operations[gl] = operation_from_error_generator(
sto_error_gen, targetOp, genType)
NpDict['S'] += len(stoProj)
if 'H+S' in projectiontypes:
gsDict['H+S'].operations[gl] = operation_from_error_generator(
ham_error_gen + sto_error_gen, targetOp, genType)
NpDict['H+S'] += len(hamProj) + len(stoProj)
if 'LNDF' in projectiontypes:
gsDict['LNDF'].operations[gl] = operation_from_error_generator(
lnd_error_gen, targetOp, genType)
NpDict['LNDF'] += HProj.size + OProj.size
if 'LND' in projectiontypes:
evals, U = _np.linalg.eig(OProj)
pos_evals = evals.clip(0, 1e100) # clip negative eigenvalues to 0
OProj_cp = _np.dot(U, _np.dot(_np.diag(pos_evals), _np.linalg.inv(U)))
#OProj_cp is now a pos-def matrix
#lnd_error_gen_cp = _np.einsum('i,ijk', HProj, HGens) + \
# _np.einsum('ij,ijkl', OProj_cp, OGens)
lnd_error_gen_cp = _np.tensordot(HProj, HGens, (0, 0)) + \
_np.tensordot(OProj_cp, OGens, ((0, 1), (0, 1)))
lnd_error_gen_cp = _bt.change_basis(lnd_error_gen_cp, "std", basis)
gsDict['LND'].operations[gl] = operation_from_error_generator(
lnd_error_gen_cp, targetOp, genType)
NpDict['LND'] += HProj.size + OProj.size
#Removed attempt to contract H+S to CPTP by removing positive stochastic projections,
# but this doesn't always return the gate to being CPTP (maybe b/c of normalization)...
#sto_error_gen_cp = _np.einsum('i,ijk', stoProj.clip(None,0), stoGens)
# # (only negative stochastic projections OK)
#sto_error_gen_cp = _tools.std_to_pp(sto_error_gen_cp)
#gsHSCP.operations[gl] = _tools.operation_from_error_generator(
# ham_error_gen, targetOp, genType) #+sto_error_gen_cp
#DEBUG!!!
#print("DEBUG: BEST sum neg evals = ",_tools.sum_of_negative_choi_evals(model))
#print("DEBUG: LNDCP sum neg evals = ",_tools.sum_of_negative_choi_evals(gsDict['LND']))
#Check for CPTP where expected
#assert(_tools.sum_of_negative_choi_evals(gsHSCP) < 1e-6)
#assert(_tools.sum_of_negative_choi_evals(gsDict['LND']) < 1e-6)
#Collect and return requrested results:
ret_gs = [gsDict[p] for p in projectiontypes]
ret_Nps = [NpDict[p] for p in projectiontypes]
return ret_gs, ret_Nps
def get_a_best_case_gauge_transform(gate_mx, target_gate_mx, returnAll=False):
"""
Returns a gauge transformation that maps `gate_mx` into a matrix that is
co-diagonal with `target_gate_mx`, i.e. they share a common set of eigenvectors.
Gauge transformations effectively change the basis of all the gates in a model.
From the perspective of a single gate a gauge transformation leaves it's
eigenvalues the same and changes its eigenvectors. This function finds a *real*
transformation that transforms the eigenspaces of `gate_mx` so that there exists
a set of eigenvectors which diagonalize both `gate_mx` and `target_gate_mx`.
Parameters
----------
gate_mx, target_gate_mx : numpy.ndarray
The gate and target-gate matrices.
returnAll : bool, optional
If true, also return the matrices of eigenvectors
for `Ugate` for gate_mx and `Utgt` for target_gate_mx such
that `U = dot(Utgt, inv(Ugate))` is real.
Returns
-------
U : numpy.ndarray
A gauge transformation such that if `epgate = U * gate_mx * U_inv`,
then `epgate` (which has the same eigenalues as `gate_mx`), can be
diagonalized with a set of eigenvectors that also diagonalize
`target_gate_mx`. Furthermore, `U` is real.
Ugate, Utgt : numpy.ndarray
only if `returnAll == True`. See above.
"""
# A complication that must be dealt with is that
# the eigenvalues of `target_gate_mx` can be degenerate,
# and so matching up eigenvalues can't be done *just* based on value.
# Our algorithm consists of two steps:
# 1) match gate & target eigenvalues based on value, ensuring conjugacy
# relationships between eigenvalues are preserved.
# 2) for each eigenvalue/vector of `gate`, project the eigenvector onto
# the eigenspace of `tgt_gate` corresponding to the matched eigenvalue.
# (treat conj-pair eigenvalues of `gate` together).
# we want a matrix that gauge-transforms gate_mx into a matrix as
# close to target_gate_mx as possible, i.e. that puts gate_mx's
# eigenvalues in the eigenspaces of target_gate_mx. This is done
# by Ubest = _np.dot(Utgt, inv(Uop)), but there are often degrees
# of freedom in Uop because of its degeneracies. Also, we want Ubest
# to be *real*, so we need to ensure the conjugacy structure of Utgt
# and Uop match...
assert(_np.linalg.norm(gate_mx.imag) < 1e-8)
assert(_np.linalg.norm(target_gate_mx.imag) < 1e-8)
if True: # NEW approach that gives sorted eigenvectors
def get_eigenspace_pairs(mx, TOL=1e-6):
evals, U = _np.linalg.eig(mx) # so mx = U * evals * Uinv
espace_pairs = {}; conj_pair_indices = []
#Pass 1: real evals and positive-imaginary-element-of-conjugate pair evals
# (these are the representatives of "eigenspace pairs")
for i, ev in enumerate(evals):
if ev.imag < -TOL:
conj_pair_indices.append(i); continue # save for pass2
#see if ev is already in espace_pairs
for k, v in espace_pairs.items():
if abs(k - ev) < TOL:
espace_pairs[k]['indices'].append(i)
espace_pairs[k]['conj_pair_indices'].append(None)
#espace_pairs[k]['evecs'].append(U[:,i])
break
else:
espace_pairs[ev] = {'indices': [i], 'conj_pair_indices': [None]}
#Pass 2: negative-imaginary-part elements of evals that occur in conjugate pairs
for i in conj_pair_indices:
ev_pos = _np.conjugate(evals[i])
for k, v in espace_pairs.items(): # ev_pos *should* be in espace_pairs
if abs(k - ev_pos) < TOL:
#found the correct eigenspace-pair to add this eval & evec to,
# now figure our where to put this index based on conjugacy relationships,
# i.e. U[:,esp['indices'][i]] is always conjugate to U[:,esp['conj_pair_indices'][i]]
for jj, j in enumerate(espace_pairs[k]['indices']):
if espace_pairs[k]['conj_pair_indices'][jj] is None: # an empty slot
espace_pairs[k]['conj_pair_indices'][jj] = i
U[:, i] = U[:, j].conj()
break
else:
raise ValueError("Nowhere to place a conjugate eigenvector %d-dim eigenbasis for %s!"
% (len(espace_pairs[k]['indices']), str(k)))
break
else:
raise ValueError("Expected to find %s as an espace-pair representative in %s"
% (str(ev_pos), str(espace_pairs.keys())))
#if not (_np.allclose(mx, _np.dot(U, _np.dot(_np.diag(evals), _np.linalg.inv(U))))):
# import bpdb; bpdb.set_trace()
return evals, U, espace_pairs
def standard_diag(mx, TOL=1e-6):
evals, U, espairs = get_eigenspace_pairs(mx)
std_evals = []
std_evecs = []
sorted_rep_evals = sorted(list(espairs.keys()), key=lambda x: (x.real, x.imag))
for ev in sorted_rep_evals: # iterate in sorted order just for definitiveness
info = espairs[ev]
dim = len(info['indices']) # dimension of this eigenspace (and it's pair, if there is one)
#Ensure real eigenvalue blocks should have real eigenvectors
if abs(ev.imag) < TOL:
#find linear combinations of the eigenvectors that are real
Usub = U[:, info['indices']]
if _np.linalg.norm(Usub.imag) > TOL:
# Im part of Usub * combo = Usub.real*combo.imag + Usub.imag*combo.real
combo_real_imag = _mt.nullspace(_np.concatenate((Usub.imag, Usub.real), axis=1))
combos = combo_real_imag[0:dim, :] + 1j * combo_real_imag[dim:, :]
if combos.shape[1] != dim:
raise ValueError(("Can only find %d (< %d) *real* linear combinations of"
" vectors in eigenspace for %s!") % (combos.shape[1], dim, str(ev)))
U[:, info['indices']] = _np.dot(Usub, combos)
assert(_np.linalg.norm(U[:, info['indices']].imag) < TOL)
#Add real eigenvalues and vectors
std_evals.extend([ev] * dim)
std_evecs.extend([U[:, i] for i in info['indices']])
else: # complex eigenvalue case - should have conjugate pair info
#Ensure blocks for conjugate-pairs of eigenvalues follow one after another and
# corresponding eigenvectors (e.g. the first of each block) are conjugate pairs
# (this is already done in the eigenspace construction)
assert(len(info['conj_pair_indices']) == dim)
std_evals.extend([ev] * dim)
std_evals.extend([_np.conjugate(ev)] * dim)
std_evecs.extend([U[:, i] for i in info['indices']])
std_evecs.extend([U[:, i] for i in info['conj_pair_indices']])
return _np.array(std_evals), _np.array(std_evecs).T
#Create "gate_tilde" which has the eigenvectors of gate_mx around the matched eigenvalues of target_gate_mx
# Doing this essentially decouples the problem of eigenvalue matching from the rest of the task -
# after gate_tilde is created, it and target_gate_mx have exactly the *same* eigenvalues.
evals_tgt, Utgt = _np.linalg.eig(target_gate_mx)
evals_gate, Uop = _np.linalg.eig(gate_mx)
pairs = _mt.minweight_match_realmxeigs(evals_gate, evals_tgt)
replace_evals = _np.array([evals_tgt[j] for _, j in pairs])
gate_tilde = _np.dot(Uop, _np.dot(_np.diag(replace_evals), _np.linalg.inv(Uop)))
#Create "standard diagonalizations" of gate_tilde and target_gate_mx, which give
# sort the eigenvalues and ensure eigenvectors occur in *corresponding* conjugate pairs
# (e.g. even when evals +1j and -1j have multiplicity 4, the first 4-D eigenspace, the
evals_tgt, Utgt = standard_diag(target_gate_mx)
evals_tilde, Uop = standard_diag(gate_tilde)
assert(_np.allclose(evals_tgt, evals_tilde))
#Update Utgt so that Utgt * inv_Uop is close to the identity
kite = _mt.get_kite(evals_tgt) # evals are grouped by standard_diag, so this works
D_prior_to_proj = _np.dot(_np.linalg.inv(Utgt), Uop)
#print("D prior to projection to ",kite," kite:"); _mt.print_mx(D_prior_to_proj)
D = _mt.project_onto_kite(D_prior_to_proj, kite)
start = 0
for i, k in enumerate(kite):
slc = slice(start, start + k)
dstart = start + k
for kk in kite[i + 1:]:
if k == kk and _np.isclose(evals_tgt[start], evals_tgt[dstart].conj()): # conjugate block!
dslc = slice(dstart, dstart + kk)
# enforce block conjugacy needed to retain Uproj conjugacy structure
D[dslc, dslc] = D[slc, slc].conj()
break
dstart += kk
start += k
Utgt = _np.dot(Utgt, D) # update Utgt
Utrans = _np.dot(Utgt, _np.linalg.inv(Uop))
assert(_np.linalg.norm(_np.imag(Utrans)) < 1e-7)
Utrans = Utrans.real # _np.real_if_close(Utrans, tol=1000)
if returnAll:
return Utrans, Uop, Utgt, evals_tgt
else:
return Utrans
evals_tgt, Utgt = _np.linalg.eig(target_gate_mx)
evals_gate, Uop = _np.linalg.eig(gate_mx)
#_, pairs = _mt.minweight_match(evals_tgt, evals_gate, return_pairs=True)
pairs = _mt.minweight_match_realmxeigs(evals_tgt, evals_gate)
#Form eigenspaces of Utgt
eigenspace = {} # key = index of target eigenval, val = assoc. eigenspace
for i, ev in enumerate(evals_tgt):
for j in eigenspace:
if _np.isclose(ev, evals_tgt[j]): # then add evector[i] to this eigenspace
eigenspace[j].append(Utgt[:, i])
eigenspace[i] = eigenspace[j] # reference!
break
else:
eigenspace[i] = [Utgt[:, i]] # new list = new eigenspace
#Project each eigenvector (col of Uop) onto space of cols
evectors = {} # key = index of gate eigenval, val = assoc. (projected) eigenvec
for ipair, (i, j) in enumerate(pairs):
#print("processing pair (i,j) = ",i,j)
if j in evectors: continue # we already processed this one!
# non-orthog projection:
# v = E * coeffs s.t. |E*coeffs-v|^2 is minimal (E is not square so can't invert)
# --> E.dag * v = E.dag * E * coeffs
# --> inv(E.dag * E) * E.dag * v = coeffs
# E*coeffs = E * inv(E.dag * E) * E.dag * v
E = _np.array(eigenspace[i]).T; Edag = E.T.conjugate()
coeffs = _np.dot(_np.dot(_np.linalg.inv(_np.dot(Edag, E)), Edag), Uop[:, j])
evectors[j] = _np.dot(E, coeffs)
#check for conjugate pair
#DB: print("Looking for conjugate:")
for i2, j2 in pairs[ipair + 1:]:
if abs(evals_gate[j].imag) > 1e-6 and _np.isclose(evals_gate[j], _np.conjugate(evals_gate[j2])) \
and _np.allclose(Uop[:, j], Uop[:, j2].conj()):
#DB: print("Found conjugate at j = ",j2)
evectors[j2] = _np.conjugate(evectors[j])
# x = _np.linalg.solve(_np.dot(Edag, E), _np.dot(Edag, evectors[j2]))
#assert(_np.isclose(_np.linalg.norm(x),_np.linalg.norm(coeffs))) ??
#check that this vector is in the span of eigenspace[i2]?
#build new "Utgt" using specially chosen linear combos of degenerate-eigenvecs
Uproj = _np.array([evectors[i] for i in range(Utgt.shape[1])]).T
assert(_np.allclose(_np.dot(Uproj, _np.dot(_np.diag(evals_tgt), _np.linalg.inv(Uproj))), target_gate_mx))
#This is how you get the eigenspace-projected gate:
# epgate = _np.dot(Uproj, _np.dot(_np.diag(evals_gate), Uproj_inv))
# epgate = _np.real_if_close(epgate, tol=1000)
# G = Uop * evals_gate * Uop_inv => eval_gate = Uop_inv * G * Uop
# epgate = Uproj * evals_gate * Uproj_inv (eigenspace-projected gate)
# so epgate = (Uproj Uop_inv) G (Uproj Uop_inv)_inv => (Uproj Uop_inv) is
# a "best_gauge_transform" for G, i.e. it makes G codiagonal with G_tgt
Ubest = _np.dot(Uproj, _np.linalg.inv(Uop))
assert(_np.linalg.norm(_np.imag(Ubest)) < 1e-7)
# this should never happen & indicates an uncaught failure in
# minweight_match_realmxeigs(...)
Ubest = Ubest.real
if returnAll:
return Ubest, Uop, Uproj, evals_tgt
else:
return Ubest
def project_to_target_eigenspace(model, targetModel, EPS=1e-6):
"""
Project each gate of `model` onto the eigenspace of the corresponding
gate within `targetModel`. Return the resulting `Model`.
Parameters
----------
model, targetModel : Model
The model being projected and the model specifying the "target"
eigen-spaces, respectively.
EPS : float, optional
Small magnitude specifying how much to "nudge" the target gates
before eigen-decomposing them, so that their spectra will have the
same conjugacy structure as the gates of `model`.
Returns
-------
Model
"""
ret = targetModel.copy()
ret.set_all_parameterizations("full") # so we can freely assign gates new values
for gl, gate in model.operations.items():
tgt_gate = targetModel.operations[gl]
#Essentially, we want to replace the eigenvalues of `tgt_gate`
# (and *only* the eigenvalues) with those of `gate`. This is what
# a "best gate gauge transform does" (by definition)
gate_mx = gate.todense()
Ugauge = get_a_best_case_gauge_transform(gate_mx, tgt_gate.todense())
Ugauge_inv = _np.linalg.inv(Ugauge)
epgate = _np.dot(Ugauge, _np.dot(gate_mx, Ugauge_inv))
ret.operations[gl] = epgate
return ret
def unitary_to_pauligate(U):
"""
Get the linear operator on (vectorized) density
matrices corresponding to a n-qubit unitary
operator on states.
Parameters
----------
U : numpy array
A dxd array giving the action of the unitary
on a state in the sigma-z basis.
where d = 2 ** n-qubits
Returns
-------
numpy array
The operator on density matrices that have been
vectorized as d**2 vectors in the Pauli basis.
"""
assert U.shape[0] == U.shape[1], '"Unitary" matrix is not square'
return _bt.change_basis(unitary_to_process_mx(U), 'std', 'pp')
def is_valid_lindblad_paramtype(typ):
"""
Whether `typ` is a recognized Lindblad-gate parameterization type.
A *Lindblad type* is comprised of a parameter specification followed
optionally by an evolution-type suffix. The parameter spec can be
"GLND" (general unconstrained Lindbladian), "CPTP" (cptp-constrained),
or any/all of the letters "H" (Hamiltonian), "S" (Stochastic, CPTP),
"s" (Stochastic), "A" (Affine), "D" (Depolarization, CPTP),
"d" (Depolarization) joined with plus (+) signs. Note that "H"
cannot appear alone, and that "A" cannot appear without one of
{"S","s","D","d"}. The suffix can be non-existent (density-matrix),
"terms" (state-vector terms) or "clifford terms" (stabilizer-state
terms). For example, valid Lindblad types are "H+S", "H+d+A",
"CPTP clifford terms", or "S+A terms".
Returns
-------
bool
"""
try:
baseTyp, _ = split_lindblad_paramtype(typ)
except ValueError:
return False # if can't even split `typ`
return baseTyp in ("CPTP", "H+S", "S", "H+S+A", "S+A", "H+D", "D", "H+D+A", "D+A",
"GLND", "H+s", "s", "H+s+A", "s+A", "H+d", "d", "H+d+A", "d+A")
def split_lindblad_paramtype(typ):
"""
Splits a Lindblad-gate parameteriation type into
a base-type (e.g. "H+S") and an evolution-type
string.
Parameters
----------
typ : str
The parameterization type, e.g. "H+S terms".
Returns
-------
base_type : str
The "base-parameterization" part of `typ`.
evotype : str
The evolution type corresponding to `typ`.
"""
bTyp = typ.split()[0] # "base" type
evostr = " ".join(typ.split()[1:])
if evostr == "": evotype = "densitymx"
elif evostr == "terms": evotype = "svterm"
elif evostr == "clifford terms": evotype = "cterm"
else: raise ValueError("Unrecognized evotype in `typ`=%s" % typ)
return bTyp, evotype
def eLabelToOutcome(povm_and_effect_lbl):
"""TODO: Docstring """
# Helper fn: POVM_ELbl:sslbls -> Elbl mapping
if povm_and_effect_lbl is None:
return "NONE" # Dummy label for placeholding
else:
if isinstance(povm_and_effect_lbl, _Label):
last_underscore = povm_and_effect_lbl.name.rindex('_')
effect_lbl = povm_and_effect_lbl.name[last_underscore + 1:]
else:
last_underscore = povm_and_effect_lbl.rindex('_')
effect_lbl = povm_and_effect_lbl[last_underscore + 1:]
return effect_lbl # effect label alone *is* the outcome
def eLabelToPOVM(povm_and_effect_lbl):
"""TODO: Docstring """
# Helper fn: POVM_ELbl:sslbls -> POVM mapping
if povm_and_effect_lbl is None:
return "NONE" # Dummy label for placeholding
else:
if isinstance(povm_and_effect_lbl, _Label):
last_underscore = povm_and_effect_lbl.name.rindex('_')
povm_name = povm_and_effect_lbl.name[:last_underscore]
else:
last_underscore = povm_and_effect_lbl.rindex('_')
povm_name = povm_and_effect_lbl[:last_underscore]
return povm_name
|
StarcoderdataPython
|
3351087
|
from chainer_compiler.elichika.typing.types import *
__all__ = [ 'builtin_func_ty' ]
def ty_len(ty_args, ty_kwargs):
x_type, = ty_args
if isinstance(x_type, TyList):
return TyInt()
if isinstance(x_type, TyTuple):
return TyInt(x_type.size())
if isinstance(x_type, TyTensor):
return TyInt(x_type.shape[0].value)
if isinstance(x_type, TyUserDefinedClass):
assert hasattr(x_type.instance, '__len__')
return TyInt(len(x_type.instance))
assert False
builtin_func_ty = {
len : ty_len,
}
|
StarcoderdataPython
|
1640827
|
<reponame>cmartinaf/rezpackages<gh_stars>1-10
name = 'ffmpeg'
version = '3.4'
authors = [
'FFmpeg Team'
]
description = \
'''
FFmpeg is the leading multimedia framework, able to decode, encode, transcode,
mux, demux, stream, filter and play pretty much anything that humans and
machines have created.
'''
private_build_requires = [
'yasm'
]
build_requires = [
'gcc',
'x264',
'x265',
'fdk_aac',
'lame',
'opus',
'vorbis',
'vpx'
]
variants = [
['platform-linux', 'arch-x86_64', 'os-CentOS-7']
]
tools = [
'ffmpeg',
'ffserver',
'ffprobe',
]
uuid = 'ffmpeg'
def commands():
env.PATH.append('{root}/bin')
env.MANPATH.append('{root}/share/man')
if building:
env.CPATH.append('{root}/include')
env.LIBRARY_PATH.append('{root}/lib')
env.PKG_CONFIG_PATH.append('{root}/lib/pkgconfig')
|
StarcoderdataPython
|
38776
|
<filename>ch05/ch0501_convnet.py
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : <EMAIL>
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : deep-learning-with-python-notebooks
@File : ch0501_convnet.py
@Version : v0.1
@Time : 2019-11-20 10:18
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《Python 深度学习,Francois Chollet》, Sec05,P
@Desc : 深度学习用于计算机视觉,卷积神经网络简介
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import winsound
from keras import activations
from keras import layers
from keras import losses
from keras import metrics
from keras import models
from keras import optimizers
from keras.datasets import mnist
from keras.utils import to_categorical
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 3, suppress = True, threshold = np.inf, linewidth = 200)
# to make this notebook's output stable across runs
seed = 42
np.random.seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
def get_convnet_model():
print("构造卷积神经网络模型")
model = models.Sequential()
# 网络输出张量的形状为:(height, width, channels)
model.add(layers.Conv2D(32, (3, 3), activation = activations.relu, input_shape = (28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation = activations.relu))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation = activations.relu))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation = activations.relu))
model.add(layers.Dense(10, activation = activations.softmax))
# print(model.summary())
return model
print("* Code 3-1:加载数据集...")
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
print("\t训练数据集(train_labels):60000 条数据;测试数据集(test_labels):10000 条数据")
print("\t\t train_images.shape =", train_images.shape)
print("\t\t train_lables.shape =", train_labels.shape)
print("\t\t test_images.shape =", test_images.shape)
print("\t\t test_labels.shape =", test_labels.shape)
print("\t数据集中每条数据是一张图片")
print("\t\t train_images[0].shape =", train_images[0].shape)
print("\t\t test_images[0].shape =", test_images[0].shape)
print("\t每条数据描述一个图片对应的数字:0~9")
print("\t\t train_lables[:10] =", train_labels[:10])
print("\t\t test_labels[:10] =", test_labels[:10])
train_images = train_images.reshape((60000, 28, 28, 1)).astype('float32') / 255
test_images = test_images.reshape((10000, 28, 28, 1)).astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
model = get_convnet_model()
model.compile(optimizer = optimizers.rmsprop(lr = 0.001),
loss = losses.categorical_crossentropy, metrics = [metrics.categorical_accuracy])
history = model.fit(train_images, train_labels, epochs = 20, batch_size = 64, verbose = 2, use_multiprocessing = True)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose = 2, use_multiprocessing = True)
print("测试集的评估精度 =", test_acc)
loss = history.history['loss']
epochs_range = range(1, len(loss) + 1)
categorical_acc = history.history['categorical_accuracy']
plt.plot(epochs_range, loss, 'bo', label = "训练集的损失")
plt.title('不同数据集的损失')
plt.xlabel('Epochs--批次')
plt.ylabel('Loss--损失')
plt.legend()
plt.plot(epochs_range, categorical_acc, 'bo', label = "训练集的精确度")
plt.title('不同数据集的精确度')
plt.xlabel('Epochs--批次')
plt.ylabel('Accuracy--精确度')
plt.legend()
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
|
StarcoderdataPython
|
3254404
|
from pytanga.components import AbstractComponent
class ospfComponent(AbstractComponent):
def __init__(self,
process_id,
vrf=None,
router_id=None,
nsr=None,
maximum_paths=None,
domain_tag=None,
ispf=None,
prefix_suppression=None,
priority=None,
shutdown=None,
cost=None,
flood_reduction=None,
hello_interval=None,
mtu_ignore=None,
resync_timeout=None,
retransmit_interval=None,
transmit_delay=None
):
self._xmlns = {
'xmlns': "http://cisco.com/ns/yang/Cisco-IOS-XE-ospf",
}
self.attributes = self.setAttributes(
process_id,
vrf,
router_id,
nsr,
maximum_paths,
domain_tag,
ispf,
prefix_suppression,
priority,
shutdown,
cost,
flood_reduction,
hello_interval,
mtu_ignore,
resync_timeout,
retransmit_interval,
transmit_delay
)
self.parent_xmlns = {}
self._children: List[AbstractComponent] = []
self.childrenData = []
self.tag = 'ospf'
@property
def xmlns(self):
return self._xmlns
@xmlns.setter
def xmlns(self, xmlns):
self._xmlns = xmlns
def setAttributes(self,
process_id,
vrf,
router_id,
nsr,
maximum_paths,
domain_tag,
ispf,
prefix_suppression,
priority,
shutdown,
cost,
flood_reduction,
hello_interval,
mtu_ignore,
resync_timeout,
retransmit_interval,
transmit_delay):
attributes = {}
if(process_id):
attributes['id'] = str(process_id)
if(vrf):
attributes['vrf'] = vrf
if(router_id):
attributes['router-id'] = router_id
if(nsr):
attributes['nsr'] = None
if(maximum_paths):
attributes['maximum-paths'] = str(maximum_paths)
if(domain_tag):
attributes['domain-tag'] = str(domain_tag)
if(ispf):
attributes['ispf'] = None
if(prefix_suppression):
attributes['prefix-suppression'] = None
if(priority):
attributes['priority'] = str(priority)
if(shutdown):
attributes['shutdown'] = 'true'
if(cost):
attributes['cost'] = str(cost)
if(flood_reduction):
attributes['flood-reduction'] = None
if(hello_interval):
attributes['hello-interval'] = str(hello_interval)
if(mtu_ignore):
attributes['mtu-ignore'] = str(mtu_ignore)
if(resync_timeout):
attributes['resync-timeout'] = str(resync_timeout)
if(retransmit_interval):
attributes['retransmit-interval'] = str(retransmit_interval)
if(transmit_delay):
attributes['transmit-delay'] = transmit_delay
return attributes
def add(self, component) -> None:
self._children.append(component)
def remove(self, component) -> None:
self._children.remove(component)
def is_composite(self) -> bool:
return False
def getXMLNS(self):
childrenData = []
for child in self._children:
self.parent_xmlns.update(child.getXMLNS())
return self.parent_xmlns
def parse(self, serializer):
self.childrenData = []
self.getXMLNS()
for child in self._children:
self.childrenData.append(child.parse(serializer))
return serializer.parse(self)
|
StarcoderdataPython
|
3271498
|
<filename>source/utils/AttributeSelector.py<gh_stars>0
class AttributeSelector:
def __init__(self):
pass
def __call__(self, tweet):
t = {}
t["text"] = tweet["text"]
t["media"] = []
for m in tweet["extended_entities"]["media"]:
if "jpg" in m["media_url"]:
t["media"].append(m["media_url"])
return t
|
StarcoderdataPython
|
1748465
|
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route("/")
def hello():
return render_template('welcome.html')
@app.route('/enviar')
def envia():
return render_template('cadastrado.html')
|
StarcoderdataPython
|
169857
|
<reponame>gabizinha12/CursoEmVideoPython<filename>Mundo_1/modulos_em_py/ex0022.py
nome = str(input("Digite seu nome: ")).strip()
print("Seu nome em maiusculas é {}".format(nome.upper()))
print("Seu nome em minusculas é {}".format(nome.lower()))
print("Seu nome tem ao todo {} letras".format(len(nome) - nome.count(' ')))
print("Seu primeiro nome é {} e ele tem {} letras.".format(nome[:nome.find(' ')], nome.find(' ')))
|
StarcoderdataPython
|
3315464
|
hyq_ref = [-1.652528468457493,
0.06758953014152885,
0.6638277139631803,
0.0,
0.0,
0.0,
1.0,
0.17905666752078864,
0.9253512562075908,
-0.8776870832724601,
0.11147422537786231,
-0.15843632504615043,
1.150049183494211,
-0.1704998924604114,
0.6859376445755911,
-1.1831277202117043,
0.06262698472369518,
-0.42708925470675,
1.2855999319965081]
|
StarcoderdataPython
|
1743023
|
<filename>tf_agents/experimental/examples/ppo/train_eval_lib.py
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Train and Eval PPOClipAgent in the Mujoco environments.
All hyperparameters come from the PPO paper
https://arxiv.org/abs/1707.06347.pdf
"""
import os
from absl import logging
import gin
import reverb
import tensorflow.compat.v2 as tf
from tf_agents.agents.ppo import ppo_clip_agent
from tf_agents.environments import suite_mujoco
from tf_agents.experimental.examples.ppo import ppo_learner
from tf_agents.metrics import py_metrics
from tf_agents.networks import actor_distribution_network
from tf_agents.networks import value_network
from tf_agents.policies import py_tf_eager_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.replay_buffers import reverb_utils
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train import triggers
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
@gin.configurable
def train_eval(
root_dir,
env_name='HalfCheetah-v2',
# Training params
num_iterations=1600,
actor_fc_layers=(64, 64),
value_fc_layers=(64, 64),
learning_rate=3e-4,
collect_sequence_length=2048,
minibatch_size=64,
num_epochs=10,
# Agent params
importance_ratio_clipping=0.2,
lambda_value=0.95,
discount_factor=0.99,
entropy_regularization=0.,
value_pred_loss_coef=0.5,
use_gae=True,
use_td_lambda_return=True,
gradient_clipping=0.5,
value_clipping=None,
# Replay params
reverb_port=None,
replay_capacity=10000,
# Others
policy_save_interval=5000,
summary_interval=1000,
eval_interval=10000,
eval_episodes=100,
debug_summaries=False,
summarize_grads_and_vars=False):
"""Trains and evaluates PPO (Importance Ratio Clipping).
Args:
root_dir: Main directory path where checkpoints, saved_models, and summaries
will be written to.
env_name: Name for the Mujoco environment to load.
num_iterations: The number of iterations to perform collection and training.
actor_fc_layers: List of fully_connected parameters for the actor network,
where each item is the number of units in the layer.
value_fc_layers: : List of fully_connected parameters for the value network,
where each item is the number of units in the layer.
learning_rate: Learning rate used on the Adam optimizer.
collect_sequence_length: Number of steps to take in each collect run.
minibatch_size: Number of elements in each mini batch. If `None`, the entire
collected sequence will be treated as one batch.
num_epochs: Number of iterations to repeat over all collected data per data
collection step. (Schulman,2017) sets this to 10 for Mujoco, 15 for
Roboschool and 3 for Atari.
importance_ratio_clipping: Epsilon in clipped, surrogate PPO objective. For
more detail, see explanation at the top of the doc.
lambda_value: Lambda parameter for TD-lambda computation.
discount_factor: Discount factor for return computation. Default to `0.99`
which is the value used for all environments from (Schulman, 2017).
entropy_regularization: Coefficient for entropy regularization loss term.
Default to `0.0` because no entropy bonus was used in (Schulman, 2017).
value_pred_loss_coef: Multiplier for value prediction loss to balance with
policy gradient loss. Default to `0.5`, which was used for all
environments in the OpenAI baseline implementation. This parameters is
irrelevant unless you are sharing part of actor_net and value_net. In that
case, you would want to tune this coeeficient, whose value depends on the
network architecture of your choice.
use_gae: If True (default False), uses generalized advantage estimation for
computing per-timestep advantage. Else, just subtracts value predictions
from empirical return.
use_td_lambda_return: If True (default False), uses td_lambda_return for
training value function; here: `td_lambda_return = gae_advantage +
value_predictions`. `use_gae` must be set to `True` as well to enable TD
-lambda returns. If `use_td_lambda_return` is set to True while
`use_gae` is False, the empirical return will be used and a warning will
be logged.
gradient_clipping: Norm length to clip gradients.
value_clipping: Difference between new and old value predictions are clipped
to this threshold. Value clipping could be helpful when training
very deep networks. Default: no clipping.
reverb_port: Port for reverb server, if None, use a randomly chosen unused
port.
replay_capacity: The maximum number of elements for the replay buffer. Items
will be wasted if this is smalled than collect_sequence_length.
policy_save_interval: How often, in train_steps, the policy will be saved.
summary_interval: How often to write data into Tensorboard.
eval_interval: How often to run evaluation, in train_steps.
eval_episodes: Number of episodes to evaluate over.
debug_summaries: Boolean for whether to gather debug summaries.
summarize_grads_and_vars: If true, gradient summaries will be written.
"""
collect_env = suite_mujoco.load(env_name)
eval_env = suite_mujoco.load(env_name)
num_environments = 1
observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(collect_env))
# TODO(b/172267869): Remove this conversion once TensorNormalizer stops
# converting float64 inputs to float32.
observation_tensor_spec = tf.TensorSpec(
dtype=tf.float32, shape=observation_tensor_spec.shape)
train_step = train_utils.create_train_step()
actor_net = actor_distribution_network.ActorDistributionNetwork(
observation_tensor_spec,
action_tensor_spec,
fc_layer_params=actor_fc_layers,
activation_fn=tf.nn.tanh,
kernel_initializer=tf.keras.initializers.Orthogonal())
value_net = value_network.ValueNetwork(
observation_tensor_spec,
fc_layer_params=value_fc_layers,
kernel_initializer=tf.keras.initializers.Orthogonal())
current_iteration = tf.Variable(0, dtype=tf.int64)
def learning_rate_fn():
# Linearly decay the learning rate.
return learning_rate * (1 - current_iteration / num_iterations)
agent = ppo_clip_agent.PPOClipAgent(
time_step_tensor_spec,
action_tensor_spec,
optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=learning_rate_fn, epsilon=1e-5),
actor_net=actor_net,
value_net=value_net,
importance_ratio_clipping=importance_ratio_clipping,
lambda_value=lambda_value,
discount_factor=discount_factor,
entropy_regularization=entropy_regularization,
value_pred_loss_coef=value_pred_loss_coef,
# This is a legacy argument for the number of times we repeat the data
# inside of the train function, incompatible with mini batch learning.
# We set the epoch number from the replay buffer and tf.Data instead.
num_epochs=1,
use_gae=use_gae,
use_td_lambda_return=use_td_lambda_return,
gradient_clipping=gradient_clipping,
value_clipping=value_clipping,
# TODO(b/150244758): Default compute_value_and_advantage_in_train to False
# after Reverb open source.
compute_value_and_advantage_in_train=False,
# Skips updating normalizers in the agent, as it's handled in the learner.
update_normalizers_in_train=False,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step)
agent.initialize()
reverb_server = reverb.Server(
[
reverb.Table( # Replay buffer storing experience for training.
name='training_table',
sampler=reverb.selectors.Fifo(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=replay_capacity,
max_times_sampled=1,
),
reverb.Table( # Replay buffer storing experience for normalization.
name='normalization_table',
sampler=reverb.selectors.Fifo(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=replay_capacity,
max_times_sampled=1,
)
],
port=reverb_port)
# Create the replay buffer.
reverb_replay_train = reverb_replay_buffer.ReverbReplayBuffer(
agent.collect_data_spec,
sequence_length=collect_sequence_length,
table_name='training_table',
server_address='localhost:{}'.format(reverb_server.port),
# The only collected sequence is used to populate the batches.
max_cycle_length=1,
rate_limiter_timeout_ms=1000)
reverb_replay_normalization = reverb_replay_buffer.ReverbReplayBuffer(
agent.collect_data_spec,
sequence_length=collect_sequence_length,
table_name='normalization_table',
server_address='localhost:{}'.format(reverb_server.port),
# The only collected sequence is used to populate the batches.
max_cycle_length=1,
rate_limiter_timeout_ms=1000)
rb_observer = reverb_utils.ReverbTrajectorySequenceObserver(
reverb_replay_train.py_client, ['training_table', 'normalization_table'],
sequence_length=collect_sequence_length,
stride_length=collect_sequence_length)
saved_model_dir = os.path.join(root_dir, learner.POLICY_SAVED_MODEL_DIR)
collect_env_step_metric = py_metrics.EnvironmentSteps()
learning_triggers = [
triggers.PolicySavedModelTrigger(
saved_model_dir,
agent,
train_step,
interval=policy_save_interval,
metadata_metrics={
triggers.ENV_STEP_METADATA_KEY: collect_env_step_metric
}),
triggers.StepPerSecondLogTrigger(train_step, interval=summary_interval),
]
def training_dataset_fn():
return reverb_replay_train.as_dataset(
sample_batch_size=num_environments,
sequence_preprocess_fn=agent.preprocess_sequence)
def normalization_dataset_fn():
return reverb_replay_normalization.as_dataset(
sample_batch_size=num_environments,
sequence_preprocess_fn=agent.preprocess_sequence)
agent_learner = ppo_learner.PPOLearner(
root_dir,
train_step,
agent,
experience_dataset_fn=training_dataset_fn,
normalization_dataset_fn=normalization_dataset_fn,
num_batches=1,
num_epochs=num_epochs,
minibatch_size=minibatch_size,
shuffle_buffer_size=collect_sequence_length,
triggers=learning_triggers)
tf_collect_policy = agent.collect_policy
collect_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_collect_policy, use_tf_function=True)
collect_actor = actor.Actor(
collect_env,
collect_policy,
train_step,
steps_per_run=collect_sequence_length,
observers=[rb_observer],
metrics=actor.collect_metrics(buffer_size=10) + [collect_env_step_metric],
reference_metrics=[collect_env_step_metric],
summary_dir=os.path.join(root_dir, learner.TRAIN_DIR),
summary_interval=summary_interval)
eval_greedy_policy = py_tf_eager_policy.PyTFEagerPolicy(
agent.policy, use_tf_function=True)
if eval_interval:
logging.info('Intial evaluation.')
eval_actor = actor.Actor(
eval_env,
eval_greedy_policy,
train_step,
metrics=actor.eval_metrics(eval_episodes),
reference_metrics=[collect_env_step_metric],
summary_dir=os.path.join(root_dir, 'eval'),
episodes_per_run=eval_episodes)
eval_actor.run_and_log()
logging.info('Training on %s', env_name)
last_eval_step = 0
for i in range(num_iterations):
collect_actor.run()
# TODO(b/159615593): Update to use observer.flush.
# Reset the reverb observer to make sure the data collected is flushed and
# written to the RB.
rb_observer.reset()
agent_learner.run()
reverb_replay_train.clear()
reverb_replay_normalization.clear()
current_iteration.assign_add(1)
# Eval only if `eval_interval` has been set. Then, eval if the current train
# step is equal or greater than the `last_eval_step` + `eval_interval` or if
# this is the last iteration. This logic exists because agent_learner.run()
# does not return after every train step.
if (eval_interval and
(agent_learner.train_step_numpy >= eval_interval + last_eval_step
or i == num_iterations - 1)):
logging.info('Evaluating.')
eval_actor.run_and_log()
last_eval_step = agent_learner.train_step_numpy
rb_observer.close()
reverb_server.stop()
|
StarcoderdataPython
|
1758186
|
<filename>artifact_py/artifact.py<gh_stars>1-10
# artifact_py: the design documentation tool made for everyone.
#
# Copyright (C) 2019 <NAME> <github.com/vitiral>
#
# The source code is Licensed under either of
#
# * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
#
# at your option.
#
# Unless you explicitly state otherwise, any contribution intentionally submitted
# for inclusion in the work by you, as defined in the Apache-2.0 license, shall
# be dual licensed as above, without any additional terms or conditions.
"""The artifact class and builder."""
from __future__ import unicode_literals
from . import utils
from .name import Name
from .name import SubPart
class Artifact:
"""Represents a design object which can be linked to other design objects.
See #SPC-design.artifact
"""
# pylint: disable=too-many-arguments
def __init__(
self,
name,
file_,
partof,
subparts,
impl,
done,
parts,
completion,
extra,
):
self.name = name
self.file = file_
self.partof = partof
self.subparts = subparts
self.impl = impl
self.done = done
self.parts = parts
self.completion = completion
self.extra = extra
def serialize(self, settings):
return {
"name": self.name.serialize(settings),
"file": settings.relpath(self.file),
"partof": sorted(settings.serialize_list(self.partof)),
"subparts": sorted(settings.serialize_list(self.subparts)),
"done": self.done,
"parts": sorted(settings.serialize_list(self.parts)),
"impl": settings.serialize_maybe(self.impl),
"completion": self.completion.serialize(settings),
"extra": self.extra,
}
class ArtifactBuilder:
"""Builder object for the artifact."""
# pylint: disable=too-many-arguments
def __init__(self, name, file_, partof, subparts, done, extra):
self.name = name
self.file = file_
self.partof = partof
self.subparts = subparts
self.done = done
self.extra = extra
self.impl = None
self.parts = None
self.completion = None
@classmethod
def from_attributes(cls, attributes, name, file_):
"""Construct from a dictionary, with some overloads available."""
return cls._from_attributes_consume(attributes=dict(attributes),
name=name,
file_=file_)
@classmethod
def _from_attributes_consume(cls, attributes, name, file_):
name_raw = name.raw
partof = utils.ensure_list(name_raw + ' partof',
attributes.pop('partof', []))
partof = {Name.from_str(n) for n in partof}
subparts = utils.ensure_list(name_raw + ' subparts',
attributes.pop('subparts', []))
subparts = {SubPart.from_str(s) for s in subparts}
done = utils.ensure_str(name_raw + ' done',
attributes.pop('done', None),
allow_none=True)
attributes.pop('artifact', None) # Normal settings. Ignore.
return cls(
name=name,
file_=file_,
partof=partof,
subparts=subparts,
done=done,
extra=attributes,
)
def set_impl(self, impl):
self.impl = impl
def set_parts(self, parts):
self.parts = parts
def set_completion(self, completion):
self.completion = completion
def build(self):
assert self.parts is not None, "must set_parts"
assert self.completion is not None, "must set_completion"
return Artifact(
name=self.name,
file_=self.file,
partof=self.partof,
subparts=self.subparts,
impl=self.impl,
done=self.done,
parts=self.parts,
completion=self.completion,
extra=self.extra,
)
def __repr__(self):
return "ArtifactBuilder({}, partof={})".format(self.name, self.partof)
|
StarcoderdataPython
|
1667221
|
from argparse import ArgumentParser
from loguru import logger
from weakly_supervised_parser.settings import TRAINED_MODEL_PATH
from weakly_supervised_parser.utils.prepare_dataset import DataLoaderHelper
from weakly_supervised_parser.utils.populate_chart import PopulateCKYChart
from weakly_supervised_parser.tree.evaluate import calculate_F1_for_spans, tree_to_spans
from weakly_supervised_parser.model.trainer import InsideOutsideStringClassifier
from weakly_supervised_parser.settings import PTB_TEST_SENTENCES_WITHOUT_PUNCTUATION_PATH, PTB_TEST_GOLD_WITHOUT_PUNCTUATION_ALIGNED_PATH
class Predictor:
def __init__(self, sentence):
self.sentence = sentence
self.sentence_list = sentence.split()
def obtain_best_parse(self, predict_type, model, scale_axis, predict_batch_size, return_df=False):
unique_tokens_flag, span_scores, df = PopulateCKYChart(sentence=self.sentence).fill_chart(predict_type=predict_type,
model=model,
scale_axis=scale_axis,
predict_batch_size=predict_batch_size)
if unique_tokens_flag:
best_parse = "(S " + " ".join(["(S " + item + ")" for item in self.sentence_list]) + ")"
logger.info("BEST PARSE", best_parse)
else:
best_parse = PopulateCKYChart(sentence=self.sentence).best_parse_tree(span_scores)
if return_df:
return best_parse, df
return best_parse
def process_test_sample(index, sentence, gold_file_path, predict_type, model, scale_axis, predict_batch_size, return_df=False):
best_parse, df = Predictor(sentence=sentence).obtain_best_parse(predict_type=predict_type,
model=model,
scale_axis=scale_axis,
predict_batch_size=predict_batch_size,
return_df=True)
gold_standard = DataLoaderHelper(input_file_object=gold_file_path)
sentence_f1 = calculate_F1_for_spans(tree_to_spans(gold_standard[index]), tree_to_spans(best_parse))
if sentence_f1 < 25.0:
logger.warning(f"Index: {index} <> F1: {sentence_f1:.2f}")
else:
logger.info(f"Index: {index} <> F1: {sentence_f1:.2f}")
if return_df:
return best_parse, df
else:
return best_parse
def process_co_train_test_sample(index, sentence, gold_file_path, inside_model, outside_model, return_df=False):
_, df_inside = PopulateCKYChart(sentence=sentence).compute_scores(predict_type="inside", model=inside_model, return_df=True)
_, df_outside = PopulateCKYChart(sentence=sentence).compute_scores(predict_type="outside", model=outside_model, return_df=True)
df = df_inside.copy()
df["scores"] = df_inside["scores"] * df_outside["scores"]
_, span_scores, df = PopulateCKYChart(sentence=sentence).fill_chart(data=df)
best_parse = PopulateCKYChart(sentence=sentence).best_parse_tree(span_scores)
gold_standard = DataLoaderHelper(input_file_object=gold_file_path)
sentence_f1 = calculate_F1_for_spans(tree_to_spans(gold_standard[index]), tree_to_spans(best_parse))
if sentence_f1 < 25.0:
logger.warning(f"Index: {index} <> F1: {sentence_f1:.2f}")
else:
logger.info(f"Index: {index} <> F1: {sentence_f1:.2f}")
return best_parse
def main():
parser = ArgumentParser(description="Inference Pipeline for the Inside Outside String Classifier", add_help=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--use_inside", action="store_true", help="Whether to predict using inside model")
group.add_argument("--use_inside_self_train", action="store_true", help="Whether to predict using inside model with self-training")
group.add_argument("--use_outside", action="store_true", help="Whether to predict using outside model")
group.add_argument("--use_inside_outside_co_train", action="store_true", help="Whether to predict using inside-outside model with co-training")
parser.add_argument("--model_name_or_path", type=str, default="roberta-base", help="Path to the model identifier from huggingface.co/models")
parser.add_argument("--save_path", type=str, required=True, help="Path to save the final trees")
parser.add_argument("--scale_axis", choices=[None, 1], default=None, help="Whether to scale axis globally (None) or sequentially (1) across batches during softmax computation")
parser.add_argument("--predict_batch_size", type=int, help="Batch size during inference")
parser.add_argument(
"--inside_max_seq_length", default=256, type=int, help="The maximum total input sequence length after tokenization for the inside model"
)
parser.add_argument(
"--outside_max_seq_length", default=64, type=int, help="The maximum total input sequence length after tokenization for the outside model"
)
args = parser.parse_args()
if args.use_inside:
pre_trained_model_path = TRAINED_MODEL_PATH + "inside_model.onnx"
max_seq_length = args.inside_max_seq_length
if args.use_inside_self_train:
pre_trained_model_path = TRAINED_MODEL_PATH + "inside_model_self_trained.onnx"
max_seq_length = args.inside_max_seq_length
if args.use_outside:
pre_trained_model_path = TRAINED_MODEL_PATH + "outside_model.onnx"
max_seq_length = args.outside_max_seq_length
if args.use_inside_outside_co_train:
inside_pre_trained_model_path = "inside_model_co_trained.onnx"
inside_model = InsideOutsideStringClassifier(model_name_or_path=args.model_name_or_path, max_seq_length=args.inside_max_seq_length)
inside_model.load_model(pre_trained_model_path=inside_pre_trained_model_path)
outside_pre_trained_model_path = "outside_model_co_trained.onnx"
outside_model = InsideOutsideStringClassifier(model_name_or_path=args.model_name_or_path, max_seq_length=args.outside_max_seq_length)
outside_model.load_model(pre_trained_model_path=outside_pre_trained_model_path)
else:
model = InsideOutsideStringClassifier(model_name_or_path=args.model_name_or_path, max_seq_length=max_seq_length)
model.load_model(pre_trained_model_path=pre_trained_model_path)
if args.use_inside or args.use_inside_self_train:
predict_type = "inside"
if args.use_outside:
predict_type = "outside"
with open(args.save_path, "w") as out_file:
print(type(args.scale_axis))
test_sentences = DataLoaderHelper(input_file_object=PTB_TEST_SENTENCES_WITHOUT_PUNCTUATION_PATH).read_lines()
test_gold_file_path = PTB_TEST_GOLD_WITHOUT_PUNCTUATION_ALIGNED_PATH
for test_index, test_sentence in enumerate(test_sentences):
if args.use_inside_outside_co_train:
best_parse = process_co_train_test_sample(
test_index, test_sentence, test_gold_file_path, inside_model=inside_model, outside_model=outside_model
)
else:
best_parse = process_test_sample(test_index, test_sentence, test_gold_file_path, predict_type=predict_type, model=model,
scale_axis=args.scale_axis, predict_batch_size=args.predict_batch_size)
out_file.write(best_parse + "\n")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1679841
|
"""
Axis class
==========
Axis is a named ordered collection of values.
For these doctests to run we are going to import numcube.Axis and numpy.
>>> from numcube import Axis
>>> import numpy as np
Creation
--------
To create an Axis object, you have to supply it with name and values. Name must be a string,
values must be convertible to one-dimensional numpy array. The values should be of the same type,
otherwise they are converted to the most flexible type.
- initialized by explicit values:
(note: dtype=object is not necessary, it is here to pass the doctests below in both Python 2 and Python 3)
>>> months = Axis("month", ["jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"])
>>> months
Axis('month', ['jan' 'feb' 'mar' 'apr' 'may' 'jun' 'jul' 'aug' 'sep' 'oct' 'nov' 'dec'])
- initialized from a range:
>>> years = Axis("year", range(2010, 2020))
>>> years
Axis('year', [2010 2011 2012 2013 2014 2015 2016 2017 2018 2019])
Properties
----------
- 'name' returns a string
>>> months.name
'month'
- 'values' returns a numpy array
note: this is commented out since this test is not portable between Python 2 and Python 3
#>>> months.values # doctest: +NORMALIZE_WHITESPACE
array(['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep',
'oct', 'nov', 'dec'])
- str(years) converts the axis to its string representation
>>> str(years)
"Axis('year', [2010 2011 2012 2013 2014 2015 2016 2017 2018 2019])"
- len(axis) returns the number of values
>>> len(months)
12
Slicing, indexing, filtering
----------------------------
The returned object is also Axis with the same name and subset of values.
>>> months[0:4]
Axis('month', ['jan' 'feb' 'mar' 'apr'])
>>> months[-1]
Axis('month', ['dec'])
>>> months[::2]
Axis('month', ['jan' 'mar' 'may' 'jul' 'sep' 'nov'])
When accessing values by their indices, you have to provide double square brackets!
>>> months[[0, 2, 4]]
Axis('month', ['jan' 'mar' 'may'])
The values can be repeated using repeated indices.
>>> months[[1, 2, 1, 2]]
Axis('month', ['feb' 'mar' 'feb' 'mar'])
To filter axis by index, you can also use method take(), which is similar to numpy.take().
>>> months.take([0, 2, 4])
Axis('month', ['jan' 'mar' 'may'])
You can filter the axis by using logical values in a numpy array.
>>> years[np.array([True, False, True, False, True, False, True, False, True, False])]
Axis('year', [2010 2012 2014 2016 2018])
The previous example was not very useful by itself. But numpy array of logical values is
the result of logical expression with axis values. Now this is much more useful.
>>> years[years.values % 2 == 0] # even years
Axis('year', [2010 2012 2014 2016 2018])
>>> years[(years.values >= 2013) & (years.values <= 2016)] # note the single '&', do not confuse with C/C++ '&&' style
Axis('year', [2013 2014 2015 2016])
To filter axis by logical values, you can also use method compress(), which is similar to numpy.compress().
In this case you do not need to convert logical values to numpy array.
>>> years.compress([True, False, True, False, True, False, True, False, True, False])
Axis('year', [2010 2012 2014 2016 2018])
Renaming
--------
We can rename the axis. Renaming returns a new axis (do not forget to assign it to a new variable!),
the original axis remains unchanged.
>>> m = months.rename("M")
>>> m
Axis('M', ['jan' 'feb' 'mar' 'apr' 'may' 'jun' 'jul' 'aug' 'sep' 'oct' 'nov' 'dec'])
This is the original axis, still with the old name:
>>> months
Axis('month', ['jan' 'feb' 'mar' 'apr' 'may' 'jun' 'jul' 'aug' 'sep' 'oct' 'nov' 'dec'])
Sorting
-------
Sorting is one of the places where numcube API and numpy API differs. Numcube sorting returns a copy
of the axis which is analogy to numpy.sort(array) function. On the other hand array.sort() sorts the
array in-place. The reason is that numcube aims to support immutability as much as possible.
>>> persons = Axis("person", ["Steve", "John", "Alex", "Peter", "Linda"])
>>> sorted_persons = persons.sort()
>>> sorted_persons
Axis('person', ['Alex' 'John' 'Linda' 'Peter' 'Steve'])
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
"""
|
StarcoderdataPython
|
110466
|
<filename>digsby/src/plugins/digsby_iq_version/__init__.py
from decimal import Decimal
from logging import getLogger
from peak.util.addons import AddOn
from pyxmpp.objects import StanzaPayloadObject
from pyxmpp.utils import to_utf8
from pyxmpp.xmlextra import get_node_ns_uri
import hooks
import libxml2
import traceback
import sys
log = getLogger('plugins.digsby_geoip')
DIGSBY_VERSION_NS = 'digsby:iq:version'
class Digsby_IqVersion(AddOn):
def __init__(self, subject):
self.protocol = subject
super(Digsby_IqVersion, self).__init__(subject)
def setup(self, stream):
self.stream = stream
log.debug('setting up digsby:iq:version')
stream.set_iq_get_handler('query', DIGSBY_VERSION_NS, self.version_get)
def version_get(self, iq):
iq = iq.make_result_response()
q = iq.new_query(DIGSBY_VERSION_NS)
q.newTextChild( q.ns(), "name", "Digsby Client" )
q.newTextChild( q.ns(), "version", ('%s' % (getattr(sys, 'REVISION', '?') or '?')))
for k in ('TAG', 'BRAND'):
v = getattr(sys, k, None)
if v:
q.newTextChild( q.ns(), k.lower(), str(v))
if not self.protocol.hide_os:
import platform
platform_string = platform.platform()
# for some reason, on my XP box, platform.platform() contains both
# the platform AND release in platform.platform(). On Ubuntu, OS X,
# and I believe older versions of Windows, this does not happen,
# so we need to add the release in all other cases.
if platform_string.find("XP") == -1:
platform_string += " " + platform.release()
q.newTextChild( q.ns(), "os", platform_string )
self.protocol.send(iq)
return True
def session_started(protocol, stream, *a, **k):
Digsby_IqVersion(protocol).setup(stream)
def initialized(protocol, *a, **k):
protocol.register_feature(DIGSBY_VERSION_NS)
|
StarcoderdataPython
|
3227747
|
from .simple_type import SimpleTypePredictor
from .float_type import FloatType
from .nan_type import NaNType
class StringType(SimpleTypePredictor):
def __init__(self):
self._float_predictor = FloatType()
self._nan_predictor = NaNType()
def validate(self, candidate, **kwargs) -> bool:
"""Return boolean representing if given candidate is a string"""
return (
not self._float_predictor.validate(candidate) and
not self._nan_predictor.validate(candidate) and
isinstance(candidate, str)
)
|
StarcoderdataPython
|
3294740
|
"""
A pattern to create a booking cluster description
The properties in this group are mostly from {region} region.
The properties are usually booked during {breakpoint} breakpoints by {adults} with {children} and {babies}.
These are {avg_spend_per_head} {detached} properties in the {complex} close to {close_to}.
The group's properties are {no smoking}, {pets}, available for {shortbreakok}, and accessible to {accessebility}.
The properties have {content} and contain {sleeps} sleeping places.
The average rating of the properties is {stars} stars.
"""
from main.describer.common import avg_spend_per_head_features, stars_features, general_conditions_features, \
accessibility_features, single_contents_features, plural_contents_features, concat_using_comma_and_and, \
select_the_best, concat_using_comma
from main.describer.user import close_to_features
adults_features = {
'adults_(0.999, 3.0]': ('small', 0),
'adults_(3.0, 8.0]': ('medium', 1),
'adults_(8.0, 16.0]': ('large', 2),
}
children_features = {
'children_(-0.001, 1.0]': ('a child', 0),
'children_(1.0, 3.0]': ('several children', 1),
'children_(3.0, 6.0]': ('many children', 2),
}
sleeps_features = {
"sleeps_(1.999, 4.0]": ("2-4", 0),
"sleeps_(4.0, 7.0]": ("5-7", 1),
"sleeps_(7.0, 11.0]": ("8-11", 2),
"sleeps_(11.0, 17.0]": ("12-17", 3),
"sleeps_(17.0, 22.0]": ("more than 17", 4),
}
def first_sentence(regions):
sentence = "The properties in this group are mostly from the %s" % concat_using_comma_and_and(regions)
sentence += " regions." if len(regions) > 1 else " region."
return sentence
def second_sentence(breakpoints, adults, children, is_babies):
sentence = "The properties are usually booked"
if breakpoints:
sentence += " during %s" % concat_using_comma_and_and(breakpoints)
sentence += " breakpoints" if len(breakpoints) > 1 else " breakpoints"
if adults:
sentence += " by a %s company of people" % select_the_best(adults)
if children:
sentence += " with %s" % select_the_best(children)
if is_babies:
sentence += " and babies"
sentence += "."
return sentence
def third_sentence(avg_spend_per_head, is_detached, is_complex, close_to):
sentence = "These are"
if avg_spend_per_head:
sentence += " %s" % concat_using_comma_and_and(avg_spend_per_head)
if is_detached:
sentence += " detached"
sentence += " properties"
if is_complex:
sentence += " in a complex"
if close_to:
sentence += " close to %s" % concat_using_comma_and_and(close_to)
sentence += "."
return sentence
def fourth_sentence(general_conditions, is_shortbreakok, accessibility):
sentence = "The properties are"
if general_conditions:
sentence += " %s" % concat_using_comma(general_conditions)
if is_shortbreakok:
sentence += ", available for a short break"
if accessibility:
if general_conditions:
sentence += ", and accessible to %s" % concat_using_comma_and_and(accessibility)
else:
sentence += " accessible to %s" % concat_using_comma_and_and(accessibility)
sentence += "."
return sentence
def fifth_sentence(contents, sleeps):
sentence = "The properties"
if contents:
sentence += " have %s" % concat_using_comma_and_and(contents)
if sleeps:
if contents:
sentence += ", and contain %s sleeping places" % select_the_best(sleeps)
else:
sentence += " contain %s sleeping places" % select_the_best(sleeps)
sentence += "."
return sentence
def sixth_sentence(stars):
sentence = "The average rating of the properties is %s stars." % select_the_best(stars)
return sentence
def describe_booking_cluster(booking_cluster_features):
regions = []
breakpoints = []
adults = []
children = []
is_babies = False
avg_spend_per_head = []
is_detached = False
is_complex = False
close_to = []
general_conditions = []
is_shortbreakok = False
accessibility = []
contents = set()
sleeps = []
stars = []
for feature in booking_cluster_features:
if feature == 'complex':
is_complex = True
elif feature == 'detached':
is_detached = True
elif feature == 'babies_(0.5, 2.0]':
is_babies = True
elif feature == 'shortbreakok':
is_shortbreakok = True
elif feature.startswith("breakpoint_"):
breakpoints.append(feature.replace("breakpoint_", ""))
elif feature.startswith("region_"):
regions.append(feature.replace("region_", ""))
elif feature.startswith("avg_spend_per_head_"):
avg_spend_per_head.append(avg_spend_per_head_features[feature])
elif feature.startswith("adults_"):
adults.append(adults_features[feature])
elif feature.startswith("children_"):
children.append(children_features[feature])
elif feature.startswith("stars_"):
stars.append(stars_features[feature])
elif feature in general_conditions_features:
general_conditions.append(general_conditions_features[feature])
elif feature in accessibility_features:
accessibility.append(accessibility_features[feature])
elif feature in close_to_features:
close_to.append(close_to_features[feature])
elif feature in single_contents_features:
contents.add(single_contents_features[feature])
elif feature in plural_contents_features:
contents.add(feature)
contents = list(contents)
desc_sentences = []
if regions:
desc_sentences.append(first_sentence(regions))
if breakpoints or adults:
desc_sentences.append(second_sentence(breakpoints, adults, children, is_babies))
if avg_spend_per_head or is_detached or is_complex or close_to:
desc_sentences.append(third_sentence(avg_spend_per_head, is_detached, is_complex, close_to))
if general_conditions or accessibility:
desc_sentences.append(fourth_sentence(general_conditions, is_shortbreakok, accessibility))
if contents or sleeps:
desc_sentences.append(fifth_sentence(contents, sleeps))
if stars:
desc_sentences.append(sixth_sentence(stars))
return " ".join(desc_sentences)
|
StarcoderdataPython
|
189962
|
<reponame>udcymen/leetcode
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class BSTIterator:
def __init__(self, root: TreeNode):
self.stack = []
self.getLeftMostNode(root)
def getLeftMostNode(self, node: TreeNode) -> None:
while node:
self.stack.append(node)
node = node.left
def next(self) -> int:
"""
@return the next smallest number
"""
node = self.stack.pop()
if node.right:
self.getLeftMostNode(node.right)
return node.val
def hasNext(self) -> bool:
"""
@return whether we have a next smallest number
"""
return len(self.stack) > 0
if __name__ == "__main__":
root = TreeNode(7)
root.left = TreeNode(3)
root.right = TreeNode(15)
root.right.left = TreeNode(9)
root.right.right = TreeNode(20)
iterator = BSTIterator(root)
print(iterator.next()); # return 3
print(iterator.next()); # return 7
print(iterator.hasNext()); # return true
print(iterator.next()); # return 9
print(iterator.hasNext()); # return true
print(iterator.next()); # return 15
print(iterator.hasNext()); # return true
print(iterator.next()); # return 20
print(iterator.hasNext()); # return false
|
StarcoderdataPython
|
114009
|
"""Webapi views."""
from typing import Any, Dict, List, Iterable, Optional, cast, Tuple
from aiohttp import web
import time
from irisett import (
metadata,
bindata,
stats,
utils,
contact,
monitor_group,
object_models,
)
from irisett.webapi import (
errors,
)
from irisett.monitor.active import (
ActiveMonitor,
ActiveMonitorDef,
create_active_monitor,
create_active_monitor_def,
get_monitor_def_by_name,
)
from irisett.monitor import active_sql
from irisett.sql import DBConnection
from irisett.contact import (
create_contact,
update_contact,
delete_contact,
add_contact_to_active_monitor,
delete_contact_from_active_monitor,
get_contacts_for_active_monitor,
set_active_monitor_contacts,
get_all_contacts_for_active_monitor,
create_contact_group,
update_contact_group,
delete_contact_group,
add_contact_to_contact_group,
delete_contact_from_contact_group,
set_contact_group_contacts,
get_contacts_for_contact_group,
get_contact_groups_for_active_monitor,
add_contact_group_to_active_monitor,
delete_contact_group_from_active_monitor,
set_active_monitor_contact_groups,
)
from irisett.webapi.require import (
require_int,
require_str,
require_bool,
require_dict,
require_list,
)
def get_request_param(request: web.Request, name: str, error_if_missing: bool = True) -> Optional[str]:
"""Get a single value from a request GET parameter.
Optionally error if it is missing.
"""
if name not in request.rel_url.query:
if error_if_missing:
raise errors.NotFound()
else:
return None
ret = request.rel_url.query[name]
return ret
def apply_metadata_to_model_list(
model_list: Iterable[Any], metadata_list: Iterable[object_models.ObjectMetadata]) -> List[Any]:
"""Take a list of model objects and add metadata to them.
This is a commonly used pattern in object get views.
"""
model_dict = {model.id: object_models.asdict(model) for model in model_list}
for model in model_dict.values():
model['metadata'] = {}
for metadata_obj in metadata_list:
model = model_dict.get(metadata_obj.object_id)
if model:
model['metadata'][metadata_obj.key] = metadata_obj.value
return list(model_dict.values())
class ActiveMonitorView(web.View):
async def get(self) -> web.Response:
dbcon = self.request.app['dbcon']
monitor_ids = await self._get_monitor_ids(dbcon)
metadata_dict = await self._get_monitor_metadata(dbcon)
monitors = []
for monitor_id in monitor_ids:
monitor = self.request.app['active_monitor_manager'].monitors.get(monitor_id, None)
if not monitor:
continue
data = self._collect_monitor_data(monitor, metadata_dict)
monitors.append(data)
return web.json_response(monitors)
async def _get_monitor_ids(self, dbcon: DBConnection) -> List[int]:
if 'id' in self.request.rel_url.query:
ids = [require_int(cast(str, get_request_param(self.request, 'id')))]
elif 'meta_key' in self.request.rel_url.query:
meta_key = require_str(get_request_param(self.request, 'meta_key'))
meta_value = require_str(get_request_param(self.request, 'meta_value'))
active_monitor_models = await active_sql.get_active_monitors_for_metadata(dbcon, meta_key, meta_value)
ids = [monitor.id for monitor in active_monitor_models]
elif 'monitor_group_id' in self.request.rel_url.query:
monitor_group_id = require_int(get_request_param(self.request, 'monitor_group_id'))
active_monitor_models = await monitor_group.get_active_monitors_for_monitor_group(dbcon, monitor_group_id)
ids = [monitor.id for monitor in active_monitor_models]
else:
active_monitor_models = await active_sql.get_all_active_monitors(dbcon)
ids = [monitor.id for monitor in active_monitor_models]
return ids
async def _get_monitor_metadata(self, dbcon: DBConnection) -> Optional[Dict[int, Dict[str, str]]]:
include_metadata = require_bool(
get_request_param(self.request, 'include_metadata', error_if_missing=False),
convert=True) or False
if not include_metadata:
return None
if 'id' in self.request.rel_url.query:
metadata_models = await metadata.get_metadata_for_object(
dbcon, 'active_monitor', require_int(cast(str, get_request_param(self.request, 'id'))))
elif 'meta_key' in self.request.rel_url.query:
meta_key = require_str(get_request_param(self.request, 'meta_key'))
meta_value = require_str(get_request_param(self.request, 'meta_value'))
metadata_models = await metadata.get_metadata_for_object_metadata(
dbcon, meta_key, meta_value, 'active_monitor', 'active_monitors')
elif 'monitor_group_id' in self.request.rel_url.query:
metadata_models = await monitor_group.get_active_monitor_metadata_for_monitor_group(
dbcon, require_int(cast(str, get_request_param(self.request, 'monitor_group_id'))))
else:
metadata_models = await metadata.get_metadata_for_object_type(dbcon, 'active_monitor')
metadata_dict = {} # type: Dict[int, Dict[str, str]]
for metadata_model in metadata_models:
if metadata_model.object_id not in metadata_dict:
metadata_dict[metadata_model.object_id] = {}
metadata_dict[metadata_model.object_id][metadata_model.key] = metadata_model.value
return metadata_dict
@staticmethod
def _collect_monitor_data(monitor: ActiveMonitor,
metadata_dict: Optional[Dict[int, Dict[str, str]]]) -> Dict[str, Any]:
ret = {
'id': monitor.id,
'state': monitor.state,
'state_ts': monitor.state_ts,
'state_elapsed': utils.get_display_time(time.time() - monitor.state_ts),
'consecutive_checks': monitor.consecutive_checks,
'last_check': monitor.last_check,
'msg': monitor.msg,
'alert_id': monitor.alert_id,
'checks_enabled': monitor.checks_enabled,
'alerts_enabled': monitor.alerts_enabled,
'monitoring': monitor.monitoring,
'args': monitor.args,
'expanded_args': monitor.get_expanded_args(),
'monitor_description': monitor.get_description(),
'monitor_def': {
'id': monitor.monitor_def.id,
'name': monitor.monitor_def.name,
'cmdline_filename': monitor.monitor_def.cmdline_filename,
'cmdline_args_tmpl': monitor.monitor_def.cmdline_args_tmpl,
'description_tmpl': monitor.monitor_def.description_tmpl,
'arg_spec': object_models.list_asdict(monitor.monitor_def.arg_spec),
},
}
if metadata_dict is not None:
ret['metadata'] = metadata_dict.get(monitor.id, {})
return ret
async def post(self) -> None:
request_data = await self.request.json()
args = require_dict(request_data['args'], str, None)
if request_data.get('use_monitor_def_name', False):
monitor_def = get_monitor_def_by_name(
self.request.app['active_monitor_manager'],
require_str(request_data['monitor_def']))
else:
monitor_def = self.request.app['active_monitor_manager'].monitor_defs.get(
require_int(request_data['monitor_def']))
if not monitor_def:
raise errors.InvalidData('Monitor def not found')
monitor = await create_active_monitor(self.request.app['active_monitor_manager'], args, monitor_def)
if not monitor:
raise errors.InvalidData('invalid monitor arguments')
return web.json_response(monitor.id)
async def put(self) -> web.Response:
if 'schedule' in self.request.rel_url.query:
ret = await self.schedule_monitor()
elif 'test_notification' in self.request.rel_url.query:
ret = await self.test_notification()
else:
ret = await self.update_monitor()
return ret
async def schedule_monitor(self) -> web.Response:
monitor = self._get_request_monitor(self.request)
monitor.schedule_immediately()
return web.json_response(True)
async def test_notification(self) -> web.Response:
monitor = self._get_request_monitor(self.request)
await monitor.notify_state_change('UNKNOWN', abs(monitor.state_ts - (time.time() - monitor.state_ts)))
return web.json_response(True)
async def update_monitor(self) -> web.Response:
request_data = await self.request.json()
monitor = self._get_request_monitor(self.request)
if 'args' in request_data:
args = cast(Dict[str, str], require_dict(request_data['args']))
await monitor.update_args(args)
if 'checks_enabled' in request_data:
await monitor.set_checks_enabled_status(cast(bool, require_bool(request_data['checks_enabled'])))
if 'alerts_enabled' in request_data:
await monitor.set_alerts_enabled_status(cast(bool, require_bool(request_data['alerts_enabled'])))
return web.json_response(True)
async def delete(self) -> web.Response:
monitor = self._get_request_monitor(self.request)
await monitor.delete()
return web.json_response(True)
# noinspection PyMethodMayBeStatic
def _get_request_monitor(self, request: web.Request) -> ActiveMonitor:
monitor_id = require_int(cast(str, get_request_param(request, 'id')))
monitor = request.app['active_monitor_manager'].monitors.get(monitor_id, None)
if not monitor:
raise errors.NotFound()
return monitor
class ActiveMonitorAlertView(web.View):
async def get(self) -> web.Response:
# noinspection PyUnusedLocal
q_args = () # type: Tuple
if 'monitor_id' in self.request.rel_url.query:
if 'only_active' in self.request.rel_url.query:
q = """select
id, monitor_id, start_ts, end_ts, alert_msg
from active_monitor_alerts
where monitor_id=%s and end_ts=0
order by start_ts desc"""
else:
q = """select
id, monitor_id, start_ts, end_ts, alert_msg
from active_monitor_alerts
where monitor_id=%s
order by start_ts desc"""
monitor_id = require_int(get_request_param(self.request, 'monitor_id'))
q_args = (monitor_id,)
ret = await self._get_alerts(q, q_args)
elif 'meta_key' in self.request.rel_url.query:
if 'only_active' in self.request.rel_url.query:
q = """select alert.id, alert.monitor_id, alert.start_ts, alert.end_ts, alert.alert_msg
from object_metadata as meta
left join active_monitors on active_monitors.id=meta.object_id
right join active_monitor_alerts as alert on alert.monitor_id=active_monitors.id
where meta.key=%s and meta.value=%s and meta.object_type="active_monitor" and alert.end_ts=0
order by alert.start_ts desc"""
else:
q = """select alert.id, alert.monitor_id, alert.start_ts, alert.end_ts, alert.alert_msg
from object_metadata as meta
left join active_monitors on active_monitors.id=meta.object_id
right join active_monitor_alerts as alert on alert.monitor_id=active_monitors.id
where meta.key=%s and meta.value=%s and meta.object_type="active_monitor"
order by alert.start_ts desc"""
meta_key = require_str(get_request_param(self.request, 'meta_key'))
meta_value = require_str(get_request_param(self.request, 'meta_value'))
q_args = (meta_key, meta_value)
ret = await self._get_alerts(q, q_args)
else:
if 'only_active' in self.request.rel_url.query:
q = """select
id, monitor_id, start_ts, end_ts, alert_msg
from active_monitor_alerts
where end_ts=0
order by start_ts desc"""
else:
q = """select
id, monitor_id, start_ts, end_ts, alert_msg
from active_monitor_alerts
order by start_ts desc"""
ret = await self._get_alerts(q, ())
return web.json_response(ret)
async def _get_alerts(self, q: str, q_args: Iterable[Any]) -> List[Dict[str, Any]]:
rows = await self.request.app['dbcon'].fetch_all(q, q_args)
ret = []
for id, monitor_id, start_ts, end_ts, alert_msg in rows:
alert = {
'id': id,
'monitor_id': monitor_id,
'start_ts': start_ts,
'end_ts': end_ts,
'alert_msg': alert_msg,
'monitor_description': '',
}
monitor = self.request.app['active_monitor_manager'].monitors.get(monitor_id, None) # type: ActiveMonitor
if monitor:
alert['monitor_description'] = monitor.get_description()
ret.append(alert)
return ret
class ActiveMonitorContactView(web.View):
async def get(self) -> web.Response:
monitor_id = cast(int, require_int(get_request_param(self.request, 'monitor_id')))
if 'include_all' in self.request.rel_url.query:
contacts = await get_all_contacts_for_active_monitor(self.request.app['dbcon'], monitor_id)
else:
contacts = object_models.asdict(
await get_contacts_for_active_monitor(self.request.app['dbcon'], monitor_id)
)
ret = object_models.list_asdict(contacts)
return web.json_response(ret)
async def post(self) -> web.Response:
request_data = await self.request.json()
await add_contact_to_active_monitor(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('contact_id'))),
cast(int, require_int(request_data.get('monitor_id'))))
return web.json_response(True)
async def delete(self) -> web.Response:
request_data = await self.request.json()
await delete_contact_from_active_monitor(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('contact_id'))),
cast(int, require_int(request_data.get('monitor_id'))))
return web.json_response(True)
async def put(self) -> web.Response:
request_data = await self.request.json()
await set_active_monitor_contacts(
self.request.app['dbcon'],
cast(List[int], require_list(request_data.get('contact_ids'), int)),
cast(int, require_int(request_data.get('monitor_id'))))
return web.json_response(True)
class ActiveMonitorContactGroupView(web.View):
async def get(self) -> web.Response:
monitor_id = cast(int, require_int(get_request_param(self.request, 'monitor_id')))
ret = await get_contact_groups_for_active_monitor(self.request.app['dbcon'], monitor_id)
return web.json_response(object_models.list_asdict(ret))
async def post(self) -> web.Response:
request_data = await self.request.json()
await add_contact_group_to_active_monitor(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('contact_group_id'))),
cast(int, require_int(request_data.get('monitor_id'))))
return web.json_response(True)
async def delete(self) -> web.Response:
request_data = await self.request.json()
await delete_contact_group_from_active_monitor(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('contact_group_id'))),
cast(int, require_int(request_data.get('monitor_id'))))
return web.json_response(True)
async def put(self) -> web.Response:
request_data = await self.request.json()
await set_active_monitor_contact_groups(
self.request.app['dbcon'],
cast(List[int], require_list(request_data.get('contact_group_ids'), int)),
cast(int, require_int(request_data.get('monitor_id'))))
return web.json_response(True)
class ActiveMonitorDefView(web.View):
async def get(self) -> web.Response:
dbcon = self.request.app['dbcon']
if 'id' in self.request.rel_url.query:
monitor_def_id = require_int(get_request_param(self.request, 'id'))
monitor_def_item = await active_sql.get_active_monitor_def(dbcon, monitor_def_id)
monitor_def_list = [] # type: Iterable[object_models.ActiveMonitorDef]
if monitor_def_item:
monitor_def_list = [monitor_def_item]
metadata_list = await metadata.get_metadata_for_object(dbcon, 'active_monitor_def', monitor_def_id)
arg_list = await active_sql.get_active_monitor_def_args_for_def(dbcon, monitor_def_id)
else:
monitor_def_list = await active_sql.get_all_active_monitor_defs(dbcon)
metadata_list = await metadata.get_metadata_for_object_type(dbcon, 'active_monitor_def')
arg_list = await active_sql.get_all_active_monitor_def_args(dbcon)
monitor_def_dict = {item.id: object_models.asdict(item) for item in monitor_def_list}
for monitor_def in monitor_def_dict.values():
monitor_def['metadata'] = {}
monitor_def['arg_def'] = []
for arg in arg_list:
monitor_def = monitor_def_dict.get(arg.active_monitor_def_id)
if monitor_def:
monitor_def['arg_def'].append(object_models.asdict(arg))
for metadata_obj in metadata_list:
monitor_def = monitor_def_dict.get(metadata_obj.object_id)
if monitor_def:
monitor_def['metadata'][metadata_obj.key] = metadata_obj.value
return web.json_response(list(monitor_def_dict.values()))
async def post(self) -> web.Response:
request_data = await self.request.json()
monitor_def = await create_active_monitor_def(
self.request.app['active_monitor_manager'],
cast(str, require_str(request_data['name'])),
cast(str, require_str(request_data['description'])),
cast(bool, require_bool(request_data['active'])),
cast(str, require_str(request_data['cmdline_filename'])),
cast(str, require_str(request_data['cmdline_args_tmpl'])),
cast(str, require_str(request_data['description_tmpl'])))
if not monitor_def:
raise errors.InvalidData('invalid monitor def arguments')
return web.json_response(monitor_def.id)
async def put(self) -> web.Response:
request_data = await self.request.json()
monitor_def = self._get_request_monitor_def(self.request)
await monitor_def.update(request_data)
return web.json_response(True)
async def delete(self) -> web.Response:
monitor_def = self._get_request_monitor_def(self.request)
await monitor_def.delete()
return web.json_response(True)
# noinspection PyMethodMayBeStatic
def _get_request_monitor_def(self, request: web.Request) -> ActiveMonitorDef:
monitor_def_id = require_int(get_request_param(request, 'id'))
monitor_def = request.app['active_monitor_manager'].monitor_defs.get(monitor_def_id, None)
if not monitor_def:
raise errors.NotFound()
return monitor_def
class ActiveMonitorDefArgView(web.View):
async def put(self) -> web.Response:
request_data = await self.request.json()
monitor_def = self._get_request_monitor_def(self.request)
monitor_def.set_arg(object_models.ActiveMonitorDefArg(
id=0,
active_monitor_def_id=monitor_def.id,
name=cast(str, require_str(request_data['name'])),
display_name=cast(str, require_str(request_data['display_name'])),
description=cast(str, require_str(request_data['description'])),
required=cast(bool, require_bool(request_data['required'])),
default_value=cast(str, require_str(request_data['default_value'])),
))
return web.json_response(True)
async def delete(self) -> web.Response:
monitor_def = self._get_request_monitor_def(self.request)
await monitor_def.delete_arg(
require_str(get_request_param(self.request, 'name'))
)
return web.json_response(True)
def _get_request_monitor_def(self, request: web.Request) -> ActiveMonitorDef:
monitor_def_id = require_int(get_request_param(request, 'id'))
monitor_def = self.request.app['active_monitor_manager'].monitor_defs.get(monitor_def_id, None)
if not monitor_def:
raise errors.NotFound()
return monitor_def
class ContactView(web.View):
async def get(self) -> web.Response:
dbcon = self.request.app['dbcon']
if 'id' in self.request.rel_url.query:
contact_id = require_int(get_request_param(self.request, 'id'))
c = await contact.get_contact(dbcon, contact_id)
contact_list = [] # type: Iterable[object_models.Contact]
if c:
contact_list = [c]
metadata_list = await metadata.get_metadata_for_object(dbcon, 'contact', contact_id)
elif 'meta_key' in self.request.rel_url.query:
meta_key = require_str(get_request_param(self.request, 'meta_key'))
meta_value = require_str(get_request_param(self.request, 'meta_value'))
contact_list = await contact.get_contacts_for_metadata(dbcon, meta_key, meta_value)
metadata_list = await metadata.get_metadata_for_object_metadata(
dbcon, meta_key, meta_value, 'contact', 'contacts')
else:
contact_list = await contact.get_all_contacts(dbcon)
metadata_list = await metadata.get_metadata_for_object_type(dbcon, 'contact')
return web.json_response(apply_metadata_to_model_list(contact_list, metadata_list))
async def post(self) -> web.Response:
request_data = await self.request.json()
contact_id = await create_contact(
self.request.app['dbcon'],
require_str(request_data.get('name', None), allow_none=True),
require_str(request_data.get('email', None), allow_none=True),
require_str(request_data.get('phone', None), allow_none=True),
cast(bool, require_bool(request_data.get('active', True)))
)
return web.json_response(contact_id)
async def put(self) -> web.Response:
request_data = await self.request.json()
contact_id = cast(int, require_int(get_request_param(self.request, 'id')))
dbcon = self.request.app['dbcon']
await update_contact(dbcon, contact_id, request_data)
return web.json_response(True)
async def delete(self) -> web.Response:
contact_id = cast(int, require_int(get_request_param(self.request, 'id')))
dbcon = self.request.app['dbcon']
await delete_contact(dbcon, contact_id)
return web.json_response(True)
class ContactGroupView(web.View):
async def get(self) -> web.Response:
dbcon = self.request.app['dbcon']
if 'id' in self.request.rel_url.query:
contact_group_id = require_int(get_request_param(self.request, 'id'))
contact_group_item = await contact.get_contact_group(dbcon, contact_group_id)
contact_group_list = [] # type: Iterable[object_models.ContactGroup]
if contact_group_item:
contact_group_list = [contact_group_item]
metadata_list = await metadata.get_metadata_for_object(dbcon, 'contact_group', contact_group_id)
elif 'meta_key' in self.request.rel_url.query:
meta_key = require_str(get_request_param(self.request, 'meta_key'))
meta_value = require_str(get_request_param(self.request, 'meta_value'))
contact_group_list = await contact.get_contact_groups_for_metadata(dbcon, meta_key, meta_value)
metadata_list = await metadata.get_metadata_for_object_metadata(
dbcon, meta_key, meta_value, 'contact_group', 'contact_groups')
else:
contact_group_list = await contact.get_all_contact_groups(dbcon)
metadata_list = await metadata.get_metadata_for_object_type(dbcon, 'monitor_group')
return web.json_response(apply_metadata_to_model_list(contact_group_list, metadata_list))
async def post(self) -> web.Response:
request_data = await self.request.json()
contact_group_id = await create_contact_group(
self.request.app['dbcon'],
require_str(request_data.get('name', None), allow_none=False),
cast(bool, require_bool(request_data.get('active', True)))
)
return web.json_response(contact_group_id)
async def put(self) -> web.Response:
request_data = await self.request.json()
contact_group_id = cast(int, require_int(get_request_param(self.request, 'id')))
dbcon = self.request.app['dbcon']
await update_contact_group(dbcon, contact_group_id, request_data)
return web.json_response(True)
async def delete(self) -> web.Response:
contact_group_id = cast(int, require_int(get_request_param(self.request, 'id')))
dbcon = self.request.app['dbcon']
await delete_contact_group(dbcon, contact_group_id)
return web.json_response(True)
class ContactGroupContactView(web.View):
async def get(self) -> web.Response:
contact_group_id = cast(int, require_int(get_request_param(self.request, 'contact_group_id')))
ret = await get_contacts_for_contact_group(self.request.app['dbcon'], contact_group_id)
return web.json_response(object_models.list_asdict(ret))
async def post(self) -> web.Response:
request_data = await self.request.json()
await add_contact_to_contact_group(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('contact_group_id'))),
cast(int, require_int(request_data.get('contact_id'))))
return web.json_response(True)
async def delete(self) -> web.Response:
request_data = await self.request.json()
await delete_contact_from_contact_group(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('contact_group_id'))),
cast(int, require_int(request_data.get('contact_id'))))
return web.json_response(True)
async def put(self) -> web.Response:
request_data = await self.request.json()
await set_contact_group_contacts(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('contact_group_id'))),
cast(List[int], require_list(request_data.get('contact_ids'), int)))
return web.json_response(True)
class MonitorGroupView(web.View):
async def get(self) -> web.Response:
dbcon = self.request.app['dbcon']
if 'id' in self.request.rel_url.query:
monitor_group_id = require_int(get_request_param(self.request, 'id'))
monitor_group_item = await monitor_group.get_monitor_group(dbcon, monitor_group_id)
monitor_group_list = [] # type: Iterable[object_models.MonitorGroup]
if monitor_group_item:
monitor_group_list = [monitor_group_item]
metadata_list = await metadata.get_metadata_for_object(dbcon, 'monitor_group', monitor_group_id)
elif 'meta_key' in self.request.rel_url.query:
meta_key = require_str(get_request_param(self.request, 'meta_key'))
meta_value = require_str(get_request_param(self.request, 'meta_value'))
monitor_group_list = await monitor_group.get_monitor_groups_for_metadata(dbcon, meta_key, meta_value)
metadata_list = await metadata.get_metadata_for_object_metadata(
dbcon, meta_key, meta_value, 'monitor_group', 'monitor_groups')
else:
monitor_group_list = await monitor_group.get_all_monitor_groups(dbcon)
metadata_list = await metadata.get_metadata_for_object_type(dbcon, 'monitor_group')
return web.json_response(apply_metadata_to_model_list(monitor_group_list, metadata_list))
async def post(self) -> web.Response:
request_data = await self.request.json()
monitor_group_id = await monitor_group.create_monitor_group(
self.request.app['dbcon'],
require_int(request_data.get('parent_id', None), allow_none=True),
require_str(request_data.get('name', None), allow_none=True)
)
return web.json_response(monitor_group_id)
async def put(self) -> web.Response:
request_data = await self.request.json()
monitor_group_id = cast(int, require_int(get_request_param(self.request, 'id')))
dbcon = self.request.app['dbcon']
exists = await monitor_group.monitor_group_exists(dbcon, monitor_group_id)
if not exists:
raise errors.NotFound()
await monitor_group.update_monitor_group(dbcon, monitor_group_id, request_data)
return web.json_response(True)
async def delete(self) -> web.Response:
monitor_group_id = cast(int, require_int(get_request_param(self.request, 'id')))
dbcon = self.request.app['dbcon']
exists = await monitor_group.monitor_group_exists(dbcon, monitor_group_id)
if not exists:
raise errors.NotFound()
await monitor_group.delete_monitor_group(dbcon, monitor_group_id)
return web.json_response(True)
class MonitorGroupActiveMonitorView(web.View):
async def post(self) -> web.Response:
request_data = await self.request.json()
await monitor_group.add_active_monitor_to_monitor_group(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('monitor_group_id'))),
cast(int, require_int(request_data.get('monitor_id'))))
return web.json_response(True)
async def delete(self) -> web.Response:
request_data = await self.request.json()
await monitor_group.delete_active_monitor_from_monitor_group(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('monitor_group_id'))),
cast(int, require_int(request_data.get('monitor_id'))))
return web.json_response(True)
class MonitorGroupContactView(web.View):
async def post(self) -> web.Response:
request_data = await self.request.json()
await monitor_group.add_contact_to_monitor_group(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('monitor_group_id'))),
cast(int, require_int(request_data.get('contact_id'))))
return web.json_response(True)
async def delete(self) -> web.Response:
request_data = await self.request.json()
await monitor_group.delete_contact_from_monitor_group(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('monitor_group_id'))),
cast(int, require_int(request_data.get('contact_id'))))
return web.json_response(True)
class MonitorGroupContactGroupView(web.View):
async def post(self) -> web.Response:
request_data = await self.request.json()
await monitor_group.add_contact_group_to_monitor_group(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('monitor_group_id'))),
cast(int, require_int(request_data.get('contact_group_id'))))
return web.json_response(True)
async def delete(self) -> web.Response:
request_data = await self.request.json()
await monitor_group.delete_contact_group_from_monitor_group(
self.request.app['dbcon'],
cast(int, require_int(request_data.get('monitor_group_id'))),
cast(int, require_int(request_data.get('contact_group_id'))))
return web.json_response(True)
class MetadataView(web.View):
async def get(self) -> web.Response:
object_type = cast(str, require_str(get_request_param(self.request, 'object_type')))
object_id = cast(int, require_int(get_request_param(self.request, 'object_id')))
metadict = await metadata.get_metadata(self.request.app['dbcon'], object_type, object_id)
return web.json_response(metadict)
async def post(self) -> web.Response:
request_data = await self.request.json()
await metadata.update_metadata(
self.request.app['dbcon'],
require_str(request_data.get('object_type')),
require_int(request_data.get('object_id')),
require_dict(request_data.get('metadict'), str)
)
return web.json_response(True)
async def delete(self) -> web.Response:
request_data = await self.request.json()
await metadata.delete_metadata(
self.request.app['dbcon'],
require_str(request_data.get('object_type')),
require_int(request_data.get('object_id')),
require_list(request_data.get('keys', None), allow_none=True))
return web.json_response(True)
class BindataView(web.View):
"""Manage binary data objects."""
async def get(self) -> web.Response:
object_type = cast(str, require_str(get_request_param(self.request, 'object_type')))
object_id = cast(int, require_int(get_request_param(self.request, 'object_id')))
key = cast(str, require_str(get_request_param(self.request, 'key')))
ret = await bindata.get_bindata(self.request.app['dbcon'], object_type, object_id, key)
if ret is None:
raise errors.NotFound()
return web.Response(body=ret)
async def post(self) -> web.Response:
object_type = cast(str, require_str(get_request_param(self.request, 'object_type')))
object_id = cast(int, require_int(get_request_param(self.request, 'object_id')))
key = cast(str, require_str(get_request_param(self.request, 'key')))
value = await self.request.read()
await bindata.set_bindata(
self.request.app['dbcon'],
object_type,
object_id,
key,
value)
return web.Response(text='')
async def delete(self) -> web.Response:
object_type = cast(str, require_str(get_request_param(self.request, 'object_type')))
object_id = cast(int, require_int(get_request_param(self.request, 'object_id')))
key = cast(str, require_str(get_request_param(self.request, 'key')))
await bindata.delete_bindata(self.request.app['dbcon'], object_type, object_id, key)
return web.Response(text='')
class StatisticsView(web.View):
"""Get server statistics"""
# noinspection PyMethodMayBeStatic
async def get(self) -> web.Response:
return web.json_response(stats.get_stats())
|
StarcoderdataPython
|
4821088
|
<filename>biorxiv/publication_delay_experiment/02_publication_delay_experiment_figure_exploration.py
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:annorxiver]
# language: python
# name: conda-env-annorxiver-py
# ---
# # Measure the Difference between Preprint-Published similarity and Published Articles
# This notebook measures the time delay that results from the peer review process. Two plots are generated: one that depict the average publication time delay as changes are demanded from the peer review process and the other that depicts the added time delay as preprints have to undergo multiple versions to be published.
# +
from datetime import timedelta
import random
from pathlib import Path
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.patches import ConnectionPatch
import numpy as np
import pandas as pd
import plotnine as p9
import requests
from scipy.spatial.distance import cdist
from scipy.stats import linregress
import seaborn as sns
from sklearn.linear_model import LogisticRegressionCV
import tqdm
from mizani.breaks import date_breaks
from mizani.formatters import timedelta_format
import warnings
mpl.rcParams["figure.dpi"] = 600
mpl.rcParams["font.size"] = 12
mpl.rcParams["font.family"] = "Arial"
warnings.filterwarnings("ignore")
# -
# # Load the Document Distances
published_date_distances = pd.read_csv(
"output/preprint_published_distances.tsv", sep="\t"
)
for col in ["preprint_date", "published_date"]:
published_date_distances[col] = pd.to_datetime(published_date_distances[col])
published_date_distances["time_to_published"] = pd.to_timedelta(
published_date_distances["time_to_published"]
)
print(published_date_distances.shape)
published_date_distances.head()
published_date_distances["days_to_published"] = published_date_distances[
"time_to_published"
].dt.days
remove_negative_time_to_published = True
if remove_negative_time_to_published:
published_date_distances = published_date_distances[
published_date_distances["days_to_published"] >= 0
]
# # Construct Scatter Plot of Date vs Version Count
# Preprints are delayed on an average of 51 days for each new version posted onto bioRxiv. This section regresses preprint's version counts against the time it takes to have a preprint published. A scatter and square bin plot are generated below.
# +
# Get smoothed linear regression line
x = published_date_distances.version_count.values.tolist()
y = published_date_distances.time_to_published.apply(
lambda x: x / timedelta(days=1)
).tolist()
xseq_2 = np.linspace(np.min(x), np.max(x), 80)
results_2 = linregress(x, y)
print(results_2)
# -
x_line = np.array(
[
published_date_distances["version_count"].min(),
published_date_distances["version_count"].max(),
]
)
y_line = x_line * results_2.slope + results_2.intercept
g = (
p9.ggplot(
published_date_distances,
p9.aes(x="factor(version_count)", y="time_to_published"),
)
+ p9.geom_boxplot(fill="#a6cee3")
+ p9.geom_line(
mapping=p9.aes(x="version_count", y="time_to_published"),
stat="smooth",
method="lm",
linetype="dashed",
se=False,
alpha=1,
size=0.7,
inherit_aes=False,
)
+ p9.scale_y_timedelta(labels=timedelta_format("d"))
+ p9.annotate(
"text",
x=9,
y=timedelta(days=1470),
label=f"Y={results_2.slope:.2f}*X+{results_2.intercept:.2f}",
)
+ p9.labs(x="# of Preprint Versions", y="Time Elapsed Until Preprint is Published")
+ p9.theme_seaborn(context="paper", style="ticks", font="Arial", font_scale=1.3)
)
# g.save("output/version_count_vs_publication_time.svg", dpi=500)
# g.save("output/version_count_vs_publication_time.png", dpi=500)
print(g)
plt.figure(figsize=(8, 5))
g = sns.boxenplot(
x="version_count",
y="days_to_published",
data=published_date_distances,
scale="linear",
palette="YlGnBu",
)
_ = g.set_ylabel("Time Elapsed Until Preprint is Published (Days)")
_ = g.set_xlabel("# of Preprint Versions")
_ = g.plot(x_line - 1, y_line, "--k")
_ = g.annotate(f"Y={results_2.slope:.2f}*X+{results_2.intercept:.2f}", (7, 1470))
plt.figure(figsize=(8, 5))
g = sns.violinplot(
x="version_count",
y="days_to_published",
data=published_date_distances,
cut=0,
scale="width",
palette="YlGnBu",
)
_ = g.set_ylabel("Time Elapsed Until Preprint is Published (Days)")
_ = g.set_xlabel("# of Preprint Versions")
_ = g.plot(x_line - 1, y_line, "--k")
_ = g.annotate(f"Y={results_2.slope:.2f}*X+{results_2.intercept:.2f}", (7, 1470))
_ = g.set_xlim(-0.5, 11.5)
_ = g.set_ylim(0, g.get_ylim()[1])
plt.savefig("output/version_count_vs_publication_time_violin_rerun.svg")
plt.savefig("output/version_count_vs_publication_time_violin_rerun.png")
# +
sns.set_theme(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
bw_adjust = 0.5
g = sns.FacetGrid(
published_date_distances,
col="version_count",
hue="version_count",
aspect=0.1,
height=5,
palette="YlGnBu",
)
# Draw the densities in a few steps
g = g.map(
sns.kdeplot,
"days_to_published",
vertical=True,
bw_adjust=bw_adjust,
clip_on=False,
fill=True,
alpha=1,
linewidth=1.5,
)
g = g.map(
sns.kdeplot,
"days_to_published",
clip_on=False,
color="w",
lw=2,
bw_adjust=bw_adjust,
vertical=True,
)
g = g.map(plt.axvline, x=0, lw=2, clip_on=False)
# reorder so right side is lower than left
for i, ax in enumerate(g.axes[0]):
ax.set_zorder(ax.zorder - i)
# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
ax = plt.gca()
ax.text(
0.03,
0,
label,
fontweight="bold",
color=color,
ha="left",
va="center",
transform=ax.transAxes,
)
_ = g.map(label, "days_to_published")
_ = g.set_ylabels("Time Elapsed Until Preprint is Published (Days)")
_ = g.set_xlabels("")
xyA = (0, y_line[0])
xyB = (0, y_line[1])
axA = g.axes[0][0]
axB = g.axes[0][-1]
line = ConnectionPatch(
xyA=xyA,
coordsA=axA.transData,
xyB=xyB,
coordsB=axB.transData,
linestyle="--",
color="k",
)
_ = g.fig.add_artist(line)
# Set the subplots to overlap
_ = g.fig.subplots_adjust(wspace=-0.7)
# Remove axes details that don't play well with overlap
_ = g.set_titles("")
_ = g.set(xticks=[])
_ = g.despine(bottom=True, left=True)
# +
sns.set_theme(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
g = sns.FacetGrid(
published_date_distances,
row="version_count",
hue="version_count",
aspect=12,
height=0.8,
palette="YlGnBu",
)
# Draw the densities in a few steps
g.map(
sns.kdeplot,
"days_to_published",
bw_adjust=0.5,
clip_on=False,
fill=True,
alpha=1,
linewidth=1.5,
)
g.map(
sns.kdeplot,
"days_to_published",
clip_on=False,
color="w",
lw=2,
bw_adjust=0.5,
)
g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
ax = plt.gca()
ax.text(
0,
0.2,
label,
fontweight="bold",
color=color,
ha="left",
va="center",
transform=ax.transAxes,
)
g.map(label, "days_to_published")
# g.set_xlabels("Diversity (1 - gini coefficient)")
# Set the subplots to overlap
g.fig.subplots_adjust(hspace=-0.7)
# Remove axes details that don't play well with overlap
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
# -
g = sns.lmplot(
x="version_count",
y="days_to_published",
data=published_date_distances,
x_bins=np.unique(x),
palette="YlGnBu",
)
_ = g.set_ylabels("Time Elapsed Until Preprint is Published (Days)")
_ = g.set_xlabels("# of Preprint Versions")
_ = g.axes[0][0].annotate(
f"Y={results_2.slope:.2f}*X+{results_2.intercept:.2f}", (2, 700)
)
_ = g.axes[0][0].set_xlim(0.5, 12.5)
_ = g.axes[0][0].set_ylim(0, g.axes[0][0].get_ylim()[1])
# # Construct Scatter Plot of Date vs Document Distances
# Preprints are delayed on an average of 17 days as changes are demanded from the peer-review process. This section regresses a preprint's document distance against the time it takes to have a preprint published. A scatter and square bin plot are generated below.
# +
# Get smoothed linear regression line
# Removed negative time here
x = published_date_distances.doc_distances.values.tolist()
y = published_date_distances.time_to_published.apply(
lambda x: x / timedelta(days=1)
).tolist()
xseq_2 = np.linspace(np.min(x), np.max(x), 80)
results_2 = linregress(x, y)
print(results_2)
# -
g = (
p9.ggplot(
published_date_distances, p9.aes(y="time_to_published", x="doc_distances")
)
+ p9.geom_point()
+ p9.geom_line(
stat="smooth", method="lm", linetype="dashed", se=False, alpha=0.9, size=0.6
)
+ p9.scale_y_timedelta(labels=timedelta_format("d"))
+ p9.annotate(
"text",
x=10,
y=timedelta(days=1450),
label=f"Y={results_2.slope:.2f}*X+{results_2.intercept:.2f}",
)
+ p9.labs(
x="Euclidean Distance of Preprints First and Final Versions",
y="Time Elapsed Until Preprint is Published",
)
+ p9.theme_seaborn(context="paper", style="ticks", font="Arial", font_scale=1.3)
)
print(g)
g = (
p9.ggplot(
published_date_distances, p9.aes(x="doc_distances", y="time_to_published")
)
+ p9.geom_bin2d(bins=100)
+ p9.scale_fill_distiller(
trans="log", direction=-1, type="seq", palette="YlGnBu", name="log(count)"
)
+ p9.geom_line(
stat="smooth", method="lm", linetype="dashed", se=False, alpha=1, size=0.7
)
+ p9.scale_y_timedelta(labels=timedelta_format("d"))
+ p9.annotate(
"text",
x=7.5,
y=timedelta(days=1490),
label=f"Y={results_2.slope:.2f}*X+{results_2.intercept:.2f}",
)
+ p9.labs(
x="Euclidean Distance of Preprint-Published Versions",
y="Time Elapsed Until Preprint is Published",
legend="log(count)",
)
)
print(g)
# # Hex grid options
# A couple hex grid options just to see what they look like
x_line = np.array(
[
published_date_distances["doc_distances"].min(),
published_date_distances["doc_distances"].max(),
]
)
y_line = x_line * results_2.slope + results_2.intercept
plt.figure(figsize=(7, 5))
ax = plt.hexbin(
published_date_distances["doc_distances"],
published_date_distances["days_to_published"],
gridsize=50,
cmap="YlGnBu_r",
norm=mpl.colors.LogNorm(),
mincnt=1,
linewidths=(0.15,)
# edgecolors=None
)
plt.xlim([0, 12])
plt.ylim([0, 1800])
ax = plt.gca()
ax.plot(x_line, y_line, "--k")
ax.annotate(
f"Y={results_2.slope:.2f}*X+{results_2.intercept:.2f}",
(6, 1490),
)
_ = ax.set_xlabel("Euclidean Distance of Preprint-Published Versions")
_ = ax.set_ylabel("Time Elapsed Until Preprint is Published (Days)")
cbar = plt.colorbar()
_ = cbar.ax.set_ylabel("count", rotation=270)
plt.savefig("output/article_distance_vs_publication_time_hex.svg")
plt.savefig("output/article_distance_vs_publication_time_hex.png")
plt.figure(figsize=(6, 5))
ax = plt.hexbin(
published_date_distances["doc_distances"],
published_date_distances["days_to_published"],
gridsize=50,
cmap="YlGnBu_r",
norm=mpl.colors.LogNorm(),
mincnt=1,
edgecolors=None,
)
plt.xlim([0, 12])
plt.ylim([0, 1800])
ax = plt.gca()
ax.plot(x_line, y_line, "--k")
ax.annotate(
f"Y={results_2.slope:.2f}*X+{results_2.intercept:.2f}",
(6, 1490),
)
_ = ax.set_xlabel("Euclidean Distance of Preprint-Published Versions")
_ = ax.set_ylabel("Time Elapsed Until Preprint is Published (Days)")
cbar = plt.colorbar()
_ = cbar.ax.set_ylabel("count", rotation=270)
# +
hexplot = sns.jointplot(
x="doc_distances",
y="days_to_published",
data=published_date_distances,
kind="hex",
joint_kws={"cmap": "YlGnBu_r", "mincnt": 1},
norm=mpl.colors.LogNorm(),
height=8,
)
hexplot.set_axis_labels(
xlabel="Euclidian Distance of Preprint-Published Versions",
ylabel="Time Elapsed Until Preprint is Published (Days)",
)
hexplot.ax_joint.plot(x_line, y_line, "--k")
hexplot.ax_joint.annotate(
f"Y={results_2.slope:.2f}*X+{results_2.intercept:.2f}",
(6, 1490),
)
# shrink fig so cbar is visible
plt.subplots_adjust(left=0.2, right=0.8, top=0.8, bottom=0.2)
# make new ax object for the cbar
cbar_ax = hexplot.fig.add_axes([0.85, 0.25, 0.05, 0.4]) # x, y, width, height
plt.colorbar(cax=cbar_ax)
_ = cbar_ax.set_ylabel("count", rotation=270)
# +
hexplot = sns.jointplot(
x="doc_distances",
y="days_to_published",
data=published_date_distances,
kind="hex",
joint_kws={"cmap": "YlGnBu_r", "mincnt": 1, "edgecolors": None},
norm=mpl.colors.LogNorm(),
height=8,
)
hexplot.set_axis_labels(
xlabel="Euclidian Distance of Preprint-Published Versions",
ylabel="Time Elapsed Until Preprint is Published (Days)",
)
hexplot.ax_joint.plot(x_line, y_line, "--k")
hexplot.ax_joint.annotate(
f"Y={results_2.slope:.2f}*X+{results_2.intercept:.2f}",
(6, 1490),
)
# shrink fig so cbar is visible
plt.subplots_adjust(left=0.2, right=0.8, top=0.8, bottom=0.2)
# make new ax object for the cbar
cbar_ax = hexplot.fig.add_axes([0.85, 0.25, 0.05, 0.4]) # x, y, width, height
plt.colorbar(cax=cbar_ax)
_ = cbar_ax.set_ylabel("count", rotation=270)
|
StarcoderdataPython
|
4808332
|
import epicbox
# import logging
# logging.basicConfig(
# format="%(levelname)-8s [%(asctime)s] %(message)s",
# level=logging.INFO,
# filename="app.log",
# )
PROFILES = {
"gcc_compile": {
"docker_image": "stepik/epicbox-gcc:6.3.0",
"user": "root",
},
"gcc_run": {
"docker_image": "stepik/epicbox-gcc:6.3.0",
"user": "sandbox",
"read_only": True,
"network_disabled": False,
},
"java": {
"docker_image": "stepik/epicbox-java:11.0.1",
},
"python": {
"docker_image": "python:3.6.5-alpine",
},
}
epicbox.configure(profiles=PROFILES)
def execute_cpp(untrusted_code, input_values, limits):
result = []
with epicbox.working_directory() as workdir:
value = epicbox.run(
"gcc_compile",
"g++ -pipe -O2 -static -o main main.cpp",
files=[{"name": "main.cpp", "content": untrusted_code}],
workdir=workdir,
)
if value["stderr"]:
return [value]
for val in input_values:
result.append(
epicbox.run(
"gcc_run",
"./main",
stdin=val,
limits=limits,
workdir=workdir,
)
)
return result
def execute_java(untrusted_code, input_values, limits):
result = []
with epicbox.working_directory() as workdir:
text = untrusted_code.decode("UTF-8")
text = text.split()
name_of_class = text[text.index("class") + 1].replace("{", "")
files = [{"name": f"{name_of_class}.java", "content": untrusted_code}]
value = epicbox.run(
"java",
f"javac {name_of_class}.java",
files=files,
workdir=workdir,
limits=limits,
)
if value["stderr"]:
return [value]
for val in input_values:
result.append(
epicbox.run(
"java",
f"java {name_of_class}",
files=files,
workdir=workdir,
stdin=val,
limits=limits,
)
)
return result
def execute_python(untrusted_code, input_values, limits):
result = []
with epicbox.working_directory() as workdir:
files = [{"name": "main.py", "content": untrusted_code}]
for val in input_values:
result.append(
epicbox.run(
"python",
"python3 main.py",
files=files,
workdir=workdir,
stdin=val,
limits=limits,
)
)
return result
def format_result(response):
exit_code = response["exit_code"] # Integer
stdout = response["stdout"].decode("utf-8") # String
stderr = response["stderr"].decode("utf-8") # String
duration = response["duration"] # Float
timeout = response["timeout"] # Boolean
oom_killed = response["oom_killed"] # Boolean
if stderr:
result = stderr
else:
result = stdout
# result = (
# "exit_code: "
# + str(exit_code)
# + "\n"
# + "stdout: "
# + stdout
# + "\n"
# + "stderr: "
# + stderr
# + "\n"
# + "duration: "
# + str(duration)
# + "\n"
# + "timeout: "
# + str(timeout)
# + "\n"
# + "oom_killed: "
# + str(oom_killed)
# + "\n"
# )
return result
def compile_text(text, language, input_values=None, limits=None, output_values=None):
if limits is None:
limits = {"cputime": 2, "memory": 64}
if input_values is None:
input_values = [""]
if output_values is None:
output_values = [""]
untrusted_code = text.encode("UTF-8")
response = ""
if language == "C++":
response = execute_cpp(untrusted_code, input_values, limits)
elif language == "Java":
response = execute_java(untrusted_code, input_values, limits)
elif language == "Python":
response = execute_python(untrusted_code, input_values, limits)
result = ""
for index, res in enumerate(response):
out = output_values[index]
inp = format_result(res)
compare_output = (inp == out)
result += (
"Output #" + str(index + 1) + " - " + str(compare_output) + '\n' + format_result(res) + "\n"
)
return result
|
StarcoderdataPython
|
889
|
<gh_stars>0
from binascii import hexlify
from functools import wraps
from logging import error
from os import urandom
from random import randint
from flask import make_response
from flask import render_template
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import Gone
from werkzeug.exceptions import InternalServerError
from werkzeug.exceptions import MethodNotAllowed
from werkzeug.exceptions import NotFound
from config import get_debug_flag
from tracker import tracker
from tracker.symbol import smileys_sad
error_handlers = []
def errorhandler(code_or_exception):
def decorator(func):
error_handlers.append({'func': func, 'code_or_exception': code_or_exception})
@wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
return decorator
def handle_error(e, code, json=False):
if json:
return {'message': e}, code
return make_response(render_template('error.html',
smiley=smileys_sad[randint(0, len(smileys_sad) - 1)],
text=e,
title='{}'.format(code)), code)
@errorhandler(NotFound.code)
def not_found(e='404: Not Found', json=False):
return handle_error(e if 'check your spelling' not in '{}'.format(e) else '404: Not Found', NotFound.code, json)
@errorhandler(Forbidden.code)
def forbidden(e='403: Forbidden', json=False):
return handle_error(e, Forbidden.code, json)
@errorhandler(MethodNotAllowed.code)
def method_not_allowed(e='405: Method Not Allowed', json=False):
return handle_error(e, MethodNotAllowed.code, json)
@errorhandler(Gone.code)
def gone(e='410: Gone', json=False):
return handle_error(e, Gone.code, json)
@errorhandler(BadRequest.code)
def bad_request(e='400: Bad Request', json=False):
return handle_error(e, BadRequest.code, json)
@errorhandler(Exception)
@errorhandler(InternalServerError.code)
def internal_error(e):
if get_debug_flag():
raise e
code = hexlify(urandom(4)).decode()
error(Exception("Code: {}".format(code), e), exc_info=True)
text = '500: Deep Shit\n{}'.format(code)
return handle_error(text, InternalServerError.code)
|
StarcoderdataPython
|
3270844
|
#!/usr/local/bin/python3
from Vagrant import Vagrant
import rumps
from apscheduler.schedulers.background import BackgroundScheduler
# Callbacks
def cb_vm_destroy(sender):
sender.vm.destroy()
def cb_vm_power_off(sender):
sender.vm.poweroff()
def cb_vm_power_on(sender):
sender.vm.poweron()
def cb_vm_provision(sender):
sender.vm.provision()
def cb_vm_reset(sender):
sender.vm.reset()
def cb_vm_ssh(sender):
sender.vm.ssh()
def cb_vm_web(sender):
sender.vm.web()
class VagrantBar(rumps.App):
vagrant = None # Store our vagrant instance
def __init__(self):
super(VagrantBar, self).__init__(name="VB", icon="assets/icon.png")
self.vagrant = Vagrant()
self.menu = []
self.build_menu()
self.menu.add('')
# Here we update the menu in the background:
sched = BackgroundScheduler()
def scheduled_updater():
self.update_menu()
sched.add_job(scheduled_updater, 'interval', seconds=5)
sched.start()
def build_menu(self):
for name,vm in self.vagrant.get_vms():
if name not in self.menu:
submenu = rumps.MenuItem(name)
if vm.state == Vagrant.STATE_POWERON:
submenu.state = 1
submenu.add(self.create_menu_item("Power Off", vm, cb_vm_power_off))
submenu.add(self.create_menu_item("Reset", vm, cb_vm_reset))
submenu.add(self.create_menu_item("Reprovision", vm, cb_vm_provision))
submenu.add('')
submenu.add(self.create_menu_item("Open SSH Console", vm, cb_vm_ssh))
submenu.add(self.create_menu_item("Open in Browser", vm, cb_vm_web))
else:
submenu.state = 0
submenu.add(self.create_menu_item("Power On", vm, cb_vm_power_on))
submenu.add('')
submenu.add(self.create_menu_item("Destroy", vm, cb_vm_destroy))
self.menu.add(submenu)
else:
item = self.menu[name]
if item.state == 0 and vm.state == Vagrant.STATE_POWERON or item.state == 1 and vm.state == Vagrant.STATE_POWEROFF:
# Update here!
item.clear()
if vm.state == Vagrant.STATE_POWERON:
item.state = 1
item.add(self.create_menu_item("Power Off", vm, cb_vm_power_off))
item.add(self.create_menu_item("Reset", vm, cb_vm_reset))
item.add(self.create_menu_item("Reprovision", vm, cb_vm_provision))
item.add('')
item.add(self.create_menu_item("Open SSH Console", vm, cb_vm_ssh))
item.add(self.create_menu_item("Open in Browser", vm, cb_vm_web))
else:
item.state = 0
item.add(self.create_menu_item("Power On", vm, cb_vm_power_on))
item.add('')
item.add(self.create_menu_item("Destroy", vm, cb_vm_destroy))
def update_menu(self):
self.vagrant.update_vms()
self.build_menu()
def create_menu_item(self, title, vm, callback):
item = rumps.MenuItem(title, callback=callback)
item.vm = vm
return item
if __name__ == "__main__":
VagrantBar().run()
|
StarcoderdataPython
|
3303822
|
import argparse
import stf_path
from trex_stf_lib.trex_client import CTRexClient
from pprint import pprint
import csv
import math
# sample TRex stateful to chnage active-flows and get results
def minimal_stateful_test(server,csv_file,a_active_flows):
trex_client = CTRexClient(server)
trex_client.start_trex(
c = 7,
m = 30000,
f = 'cap2/cur_flow_single.yaml',
# f = 'cap2/cur_flow.yaml',
d = 30,
l = 1000,
p=True,
cfg = "cfg/trex_08_5mflows.yaml",
active_flows=a_active_flows,
nc=True
)
result = trex_client.sample_until_finish()
active_flows = result.get_value_list('trex-global.data.m_active_flows')
cpu_utl = result.get_value_list('trex-global.data.m_cpu_util')
pps = result.get_value_list('trex-global.data.m_tx_pps')
queue_full = result.get_value_list('trex-global.data.m_total_queue_full')
if queue_full[-1]>10000:
print("WARNING QUEU WAS FULL");
tuple=(active_flows[-5],cpu_utl[-5],pps[-5],queue_full[-1])
print(tuple)
file_writer = csv.writer(test_file)
file_writer.writerow(tuple);
if __name__ == '__main__':
test_file = open('tw_2_layers.csv', 'wb');
parser = argparse.ArgumentParser(description="active-flow example")
parser.add_argument('-s', '--server',
dest='server',
help='Remote trex address',
default='127.0.0.1',
type = str)
args = parser.parse_args()
max_flows = 8000000;
min_flows = 100;
active_flow = min_flows;
num_point = 40
factor = math.exp(math.log(max_flows/min_flows,math.e)/num_point);
for i in range(num_point+1):
print("<<=====================>>",i,math.floor(active_flow))
minimal_stateful_test(args.server,test_file,math.floor(active_flow))
active_flow=active_flow*factor
test_file.close();
|
StarcoderdataPython
|
1660494
|
<filename>tools/blender/md3_import.py
#!BPY
"""
Name: 'Quake3 (.md3)...'
Blender: 242
Group: 'Import'
Tooltip: 'Import from Quake3 file format. (.md3)'
"""
__author__ = "PhaethonH, <NAME>, Robert (Tr3B) Beckebans"
__url__ = ("http://xreal.sourceforge.net")
__version__ = "0.6 2006-11-12"
__bpydoc__ = """\
This script imports a Quake 3 file (MD3), textures,
and animations into Blender for editing. Loader is based on MD3 loader
from www.gametutorials.com-Thanks DigiBen! and the
md3 blender loader by PhaethonH <<EMAIL>>
Supported:<br>
Surfaces and Materials
Missing:<br>
Animations, Tag rotations
Known issues:<br>
None
Notes:<br>
TODO
"""
import sys, os, os.path, struct, string, math
import Blender, Blender.Scene
from Blender import *
import types
import textwrap
import logging
reload(logging)
import md3
from md3 import *
import q_math
from q_math import *
# our own logger class. it works just the same as a normal logger except
# all info messages get show.
class Logger(logging.Logger):
def __init__(self, name,level = logging.NOTSET):
logging.Logger.__init__(self, name, level)
self.has_warnings = False
self.has_errors = False
self.has_critical = False
def info(self, msg, *args, **kwargs):
apply(self._log,(logging.INFO, msg, args), kwargs)
def warning(self, msg, *args, **kwargs):
logging.Logger.warning(self, msg, *args, **kwargs)
self.has_warnings = True
def error(self, msg, *args, **kwargs):
logging.Logger.error(self, msg, *args, **kwargs)
self.has_errors = True
def critical(self, msg, *args, **kwargs):
logging.Logger.critical(self, msg, *args, **kwargs)
self.has_errors = True
# should be able to make this print to stdout in realtime and save MESSAGES
# as well. perhaps also have a log to file option
class LogHandler(logging.StreamHandler):
def __init__(self):
logging.StreamHandler.__init__(self, sys.stdout)
if "md3_import_log" not in Blender.Text.Get():
self.outtext = Blender.Text.New("md3_import_log")
else:
self.outtext = Blender.Text.Get('md3_import_log')
self.outtext.clear()
self.lastmsg = ''
def emit(self, record):
# print to stdout and to a new blender text object
msg = self.format(record)
if msg == self.lastmsg:
return
self.lastmsg = msg
self.outtext.write("%s\n" %msg)
logging.StreamHandler.emit(self, record)
logging.setLoggerClass(Logger)
log = logging.getLogger('md3_import')
handler = LogHandler()
formatter = logging.Formatter('%(levelname)s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
# set this to minimum output level. eg. logging.DEBUG, logging.WARNING, logging.ERROR
# logging.CRITICAL. logging.INFO will make little difference as these always get
# output'd
log.setLevel(logging.WARNING)
class BlenderGui:
def __init__(self):
text = """A log has been written to a blender text window. Change this window type to
a text window and you will be able to select the file md3_import_log."""
text = textwrap.wrap(text,40)
text += ['']
if log.has_critical:
text += ['There were critical errors!!!!']
elif log.has_errors:
text += ['There were errors!']
elif log.has_warnings:
text += ['There were warnings']
# add any more text before here
text.reverse()
self.msg = text
Blender.Draw.Register(self.gui, self.event, self.button_event)
def gui(self,):
quitbutton = Blender.Draw.Button("Exit", 1, 0, 0, 100, 20, "Close Window")
y = 35
for line in self.msg:
BGL.glRasterPos2i(10,y)
Blender.Draw.Text(line)
y+=15
def event(self,evt, val):
if evt == Blender.Draw.ESCKEY:
Blender.Draw.Exit()
return
def button_event(self,evt):
if evt == 1:
Blender.Draw.Exit()
return
def Import(fileName):
log.info("Starting ...")
log.info("Importing MD3 model: %s", fileName)
pathName = StripGamePath(StripModel(fileName))
log.info("Shader path name: %s", pathName)
modelName = StripExtension(StripPath(fileName))
log.info("Model name: %s", modelName)
# read the file in
file = open(fileName,"rb")
md3 = md3Object()
md3.Load(file, log)
md3.Dump(log)
file.close()
scene = Scene.getCurrent()
for k in range(0, md3.numSurfaces):
surface = md3.surfaces[k]
# create a new mesh
mesh = Mesh.New(surface.name)
#uv = []
uvList = []
# make the verts
for i in range(0, surface.numVerts):
mesh.verts.extend( [surface.verts[i].xyz] )
# make the faces
for i in range(0, surface.numTriangles):
mesh.faces.extend( surface.triangles[i].indexes )
# use faceUV
mesh.faceUV=True
# extend ignores redundant faces
log.info("Redundant faces for %s: %i", surface.name , surface.numTriangles-len(mesh.faces) )
# make the UV list
for tex_coord in surface.uv:
u = tex_coord.u
v = tex_coord.v
uv = Mathutils.Vector([u,v])
uvList.append(uv)
# import uv
log.info("uv")
for i in range(0, len(mesh.faces)):
uvData = []
uvData.append(uvList[surface.triangles[i].indexes[0]])
uvData.append(uvList[surface.triangles[i].indexes[1]])
uvData.append(uvList[surface.triangles[i].indexes[2]])
mesh.faces[i].uv = uvData
# set smooth
mesh.faces[i].smooth=1
# add object
log.info("addobj")
meshObject = Object.New('Mesh',surface.name)
meshObject.link(mesh)
scene.link(meshObject)
# animate the verts through keyframe animation
log.info("anim")
for i in range(0, surface.numFrames):
# update the vertices
for j in range(0, surface.numVerts):
# i*sufrace.numVerts+j=where in the surface vertex list the vert position for this frame is
#x = surface.verts[(i * surface.numVerts) + j].xyz[0]
#y = surface.verts[(i * surface.numVerts) + j].xyz[1]
#z = surface.verts[(i * surface.numVerts) + j].xyz[2]
# put the vertex in the right spot
#mesh.verts[j].co[0] = x
#mesh.verts[j].co[1] = y
#mesh.verts[j].co[2] = z
xyz = Mathutils.Vector(surface.verts[(i * surface.numVerts) + j].xyz)
mesh.verts[j].co = xyz
meshObject.insertShapeKey()
#select all and remove doubles
mesh.sel=1
mesh.remDoubles(0.0)
# create materials for surface
log.info("shade")
for i in range(0, surface.numShaders):
# create new material if necessary
matName = StripExtension(StripPath(surface.shaders[i].name))
if matName == "" :
matName = "no_texture"
try:
mat = Material.Get(matName)
except:
log.info("Creating new material: %s", matName)
mat = Material.New(matName)
# create new texture
texture = Texture.New(matName)
texture.setType('Image')
# try .tga by default
imageName = StripExtension(GAMEDIR + surface.shaders[i].name) + '.tga'
try:
image = Image.Load(imageName)
texture.image = image
except:
try:
imageName = StripExtension(imageName) + '.png'
image = Image.Load(imageName)
texture.image = image
except:
try:
imageName = StripExtension(imageName) + '.jpg'
image = Image.Load(imageName)
texture.image = image
except:
log.warning("Unable to load image: %s", imageName)
# texture to material
mat.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL)
# append material to the mesh's list of materials
mesh.materials += [mat]
mesh.update()
for key in Key.Get() :
key.ipo = Ipo.New('Key', "bleh" + "_ipo")
index = 1
for curveName in key.ipo.curveConsts :
# print curveName
key.ipo.addCurve(curveName)
key.ipo[curveName].interpolation = IpoCurve.InterpTypes.CONST
key.ipo[curveName].addBezier((0, 0))
key.ipo[curveName].addBezier((index, 1))
key.ipo[curveName].addBezier((index + 1, 0))
index+=1
#for key in Key.Get() :
# print key.ipo.curveConsts
log.info("tags")
# create tags
for i in range(0, md3.numTags):
tag = md3.tags[i]
# this should be an Empty object
blenderTag = Object.New("Empty", tag.name);
# set ipo
ipo = Ipo.New('Object', tag.name + "_ipo")
locX = ipo.addCurve('LocX')
locY = ipo.addCurve('LocY')
locZ = ipo.addCurve('LocZ')
rotX = ipo.addCurve('RotX')
rotY = ipo.addCurve('RotY')
rotZ = ipo.addCurve('RotZ')
locX.interpolation=IpoCurve.InterpTypes.LINEAR
locY.interpolation=IpoCurve.InterpTypes.LINEAR
locZ.interpolation=IpoCurve.InterpTypes.LINEAR
rotX.interpolation=IpoCurve.InterpTypes.LINEAR
rotY.interpolation=IpoCurve.InterpTypes.LINEAR
rotZ.interpolation=IpoCurve.InterpTypes.LINEAR
#set ipo for tag
blenderTag.setIpo(ipo)
scene.link(blenderTag)
blenderTag.setLocation(tag.origin)
# FIXME this imports only the baseframe tags
for i in range(0, md3.numFrames):
for j in range(0, md3.numTags):
tag = md3.tags[i * md3.numTags + j]
#Blender.Set("curframe", i)
#tagName = tag.name# + '_' + str(i)
#blenderTag = Object.New("Empty", tagName);
#tags.append(blenderTag)
#scene.link(blenderTag)
#blenderTag = tags[j]
blenderTag = Object.Get(tag.name)
ipo = Ipo.Get(tag.name + "_ipo")
locX = ipo[Ipo.OB_LOCX]
locY = ipo[Ipo.OB_LOCY]
locZ = ipo[Ipo.OB_LOCZ]
rotX = ipo[Ipo.OB_ROTX]
rotY = ipo[Ipo.OB_ROTY]
rotZ = ipo[Ipo.OB_ROTZ]
# Note: Quake3 uses left-hand geometry
forward = [tag.axis[0], tag.axis[1], tag.axis[2]]
left = [tag.axis[3], tag.axis[4], tag.axis[5]]
up = [tag.axis[6], tag.axis[7], tag.axis[8]]
transform = MatrixSetupTransform(forward, left, up, tag.origin)
transform2 = Blender.Mathutils.Matrix(transform[0], transform[1], transform[2], transform[3])
rotation = Blender.Mathutils.Matrix(forward, left, up)
rot_Euler = rotation.toEuler()
#rot_Euler.unique()
#blenderTag.setMatrix(transform2)
#print "org: ", tag.origin
locX.addBezier((i+1, tag.origin[0]))
locY.addBezier((i+1, tag.origin[1]))
locZ.addBezier((i+1, tag.origin[2]))
rotX.addBezier((i+1, DEG2RAD(rot_Euler.x)))
rotY.addBezier((i+1, DEG2RAD(rot_Euler.y)))
rotZ.addBezier((i+1, DEG2RAD(rot_Euler.z)))
#blenderTag.setLocation(tag.origin)
#blenderTag.insertKey(i,"relative")
# locate the Object containing the mesh at the cursor location
if md3.numSurfaces:
cursorPos = Blender.Window.GetCursorPos()
#meshObject.setLocation(float(cursorPos[0]), float(cursorPos[1]), float(cursorPos[2]))
meshObject.setLocation(float(cursorPos[0]), float(cursorPos[1]), float(cursorPos[2]))
# not really necessary, but I like playing with the frame counter
#Blender.Set("staframe", 1)
#Blender.Set("curframe", md3.numFrames)
#Blender.Set("endframe", md3.numFrames)
def FileSelectorCallback(fileName):
Import(fileName)
BlenderGui()
Blender.Window.FileSelector(FileSelectorCallback, 'Import Quake3 MD3')
|
StarcoderdataPython
|
3357765
|
import pandas as pd
def row_to_tex(row):
if 'good_cand_ibata' in row.index:
flag = '' if pd.isnull(row['good_cand_ibata']) else row['good_cand_ibata']
else:
flag = ''
string = str(row['source_id']) + ' & '
string += "${0:.3f}$ & ".format(row['ra'])
string += "${0:.3f}$ & ".format(row['dec'])
string += "{0:.3f} & ".format(row['g0'])
string += "{0:.3f} & ".format(row['bp_rp0'])
string += "${0:.2f} \pm {1:.2f}$ & ".format(row['parallax'], row['parallax_error'])
string += "${0:.2f} \pm {1:.2f}$ & ".format(row['pmra'], row['pmra_error'])
string += "${0:.2f} \pm {1:.2f}$ & ".format(row['pmdec'], row['pmdec_error'])
if 'v_hel' in row.index:
if pd.isnull(row['v_hel']):
string += ''
else:
string += "${0:.2f} \pm {1:.2f}$".format(row['v_hel'], row['v_hel_error'])
if ~pd.isnull(row['other_id']) & ('ibata' in row['other_id']):
string += '*'
else:
string += ''
string += ' & ' + flag
string += ' \\\\\n'
return string
df = pd.read_csv('data/gaia-data/gjoll-plus-bright.csv')
filename = 'tables/candidates.tex'
df.sort_values('ra', inplace=True)
all = pd.read_csv('data/gaia-data/all-candidates.csv')
all = all[all['cmdflag'] == True]
all.sort_values('ra', inplace=True)
with open(filename, 'w') as f:
for name, row in df.iterrows():
string = row_to_tex(row)
f.write(string)
f.write('\\hline \n')
for name, row in all.iloc[:5].iterrows():
string = row_to_tex(row)
f.write(string)
string = '\\vdots & ' * 10
f.write(string[:-2] + "\\\\\n")
cols = ['source_id', 'ra', 'dec', 'g0', 'bp_rp0', 'parallax', 'parallax_error',
'pmra', 'pmra_error', 'pmdec', 'pmdec_error', 'v_hel', 'v_hel_error',
'good_cand_ibata']
table1 = pd.concat([df, all], ignore_index=True)[cols]
table1.drop_duplicates(subset=['source_id'], inplace=True)
table1.to_csv('tables/table1-long.csv', index=False)
|
StarcoderdataPython
|
3348417
|
_base_ = [
'../../_base_/models/deeppad_r50.py',
'../../_base_/datasets/cityscapes.py', '../../_base_/default_runtime.py',
'../../_base_/schedules/schedule_80k_warmup.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
backbone=dict(
norm_cfg=norm_cfg,
),
decode_head=dict(
type='BilinearPADHead_fast',
upsample_factor=8,
dyn_branch_ch=16,
mask_head_ch=16,
norm_cfg=norm_cfg,
),
auxiliary_head=dict(norm_cfg=norm_cfg)
)
|
StarcoderdataPython
|
1652503
|
<gh_stars>1-10
import pytest
from django.test import TestCase
@pytest.mark.builder_backend
class BuilderBackendTestCase(TestCase):
pass
@pytest.mark.builder
class BuilderTestCase(TestCase):
pass
|
StarcoderdataPython
|
1724563
|
import dateutil.parser
from dateutil import tz
from datetime import timedelta
import time
import requests
from requests.auth import HTTPBasicAuth
from xui.solar_winds.solar_winds_scripts import TEST_SERVER_CONNECTION
from common.methods import generate_string_from_template
from utilities.models import ConnectionInfo
from c2_wrapper import create_hook
class SolarWindsManager:
def __init__(self):
self.ci_for_server = ConnectionInfo.objects.filter(name__iexact='SolarWinds Connection Info').first()
self.ci_for_rest = ConnectionInfo.objects.filter(name__iexact='SolarWinds Connection Info Rest').first()
def get_connection_info(self, for_server=False):
if for_server:
return self.ci_for_server
return self.ci_for_rest
def execute_script(self, template, context):
ci = self.get_connection_info(for_server=True)
template = generate_string_from_template(template=template,
group=None, env=None,
os_build=None, context=context)
ci.protocol = "winrm"
result = ci.execute_script(script_contents=template)
return result
def install_windows_agent(self, template, context):
result = self.execute_script(template, context)
return result
def install_linux_agent(self, server, template, context):
linux_template = generate_string_from_template(template=template,
group=None, env=None,
os_build=None, context=context)
result = server.execute_script(script_contents=linux_template, run_with_sudo=True)
return result
def verify_rest_credentials(self, **kwargs):
if 'connection_info' in kwargs:
ci = kwargs.get('connection_info')
else:
ci = self.ci_for_rest
ip = ci.ip
port = ci.port
protocol = ci.protocol
username = ci.username
password = <PASSWORD>
path = "/SolarWinds/InformationService/v3/Json/Query?query=SELECT+TOP+10+AccountID+FROM+Orion.Accounts"
url = f"{protocol}://{str(ip)}:{port}{path}"
try:
response = requests.get(url, auth=HTTPBasicAuth(
username, password), verify=False)
except Exception:
return False
return response.ok
def verify_server_connection(self, **kwargs):
if 'server_connection_info' in kwargs:
ci = kwargs.get('server_connection_info')
else:
ci = self.ci_for_server
template = generate_string_from_template(template=TEST_SERVER_CONNECTION,
group=None, env=None,
os_build=None, context={})
try:
ci.protocol = "winrm"
result = ci.execute_script(script_contents=template)
if result.split('\r\n'):
return (True, "Connection succesful")
else:
return False, "Could not connect to the server"
except Exception as error:
return False, error
def get_solar_winds_rest_ci(self):
return self.ci_for_rest
def get_solar_winds_server_ci(self):
return self.ci_for_server
def get_solar_winds_nodes(self):
ci = self.ci_for_rest
path = "/SolarWinds/InformationService/v3/Json/Query?query=SELECT+TOP+1000+NodeID,IPAddress,NodeName,Vendor+FROM+Orion.Nodes"
try:
url = f"{ci.protocol}://{str(ci.ip)}:{ci.port}{path}"
response = requests.get(url, auth=HTTPBasicAuth(
ci.username, ci.password), verify=False)
return response.json().get('results')
except Exception:
return []
def generate_url(self, path):
ci = self.get_connection_info()
url = ci.format_url() + f'/{path}'
return url
def get_auth(self):
# returns a HTTPBasicAuth object that has been authenticated
ci = self.get_connection_info()
return HTTPBasicAuth(ci.username, ci.password)
def is_agent_installed(self, server_ip):
response = self.send_get_requests(url='SolarWinds/InformationService/v3/Json/Query?query=SELECT+IpAddress+FROM+Orion.Nodes')
if response.ok:
ip_addresses = response.json().get('results')
exists = [address['IpAddress'] for address in ip_addresses if address['IpAddress'] == server_ip]
if not exists:
# Ip not found thus agent not installed
return False, 'Not Installed'
else:
return True, 'Installed'
return False, response.reason
def get_node_id(self, server_ip):
response = self.send_get_requests('SolarWinds/InformationService/v3/Json/Query?query=SELECT+IpAddress,NodeId+FROM+Orion.Nodes')
if response.ok:
nodes_ip_addresses = response.json().get('results')
node_id = [nodes_ip_address['NodeId'] for nodes_ip_address in nodes_ip_addresses if nodes_ip_address['IpAddress'] == server_ip]
if node_id:
return node_id[0]
def get_server_stat(self, server_ip):
node_id = self.get_node_id(server_ip)
if node_id:
response = self.send_get_requests(f'SolarWinds/InformationService/v3/Json/Query?query=SELECT+NodeId,CpuCount,TotalMemory, CpuLoad, MemoryUsed+FROM+Orion.NodesStats+WHERE+NodeID={node_id}')
if response.ok:
server_stats = response.json().get('results')[0]
return True, server_stats
return False, response.reason
else:
return False, 'Server not registered on the solarwinds server.'
def get_cpu_load_metrics(self, server_ip):
node_id = self.get_node_id(server_ip)
response = self.send_get_requests(f'SolarWinds/InformationService/v3/Json/Query?query=SELECT+NodeId,DateTime,MinLoad,MaxLoad,AvgLoad+FROM+Orion.CPULoad+WHERE+NodeID={node_id}')
if response.ok:
results = response.json().get('results')
data = [[self.convert_time_to_timestamp(load.get('DateTime')), load.get('AvgLoad')] for load in results]
return data
def get_memory_used_metrics(self, server_ip):
node_id = self.get_node_id(server_ip)
response = self.send_get_requests(f'SolarWinds/InformationService/v3/Json/Query?query=SELECT+NodeId,DateTime,AvgPercentMemoryUsed+FROM+Orion.CPULoad+WHERE+NodeID={node_id}')
if response.ok:
results = response.json().get('results')
data = [[self.convert_time_to_timestamp(load.get('DateTime')), load.get('AvgPercentMemoryUsed')] for load in results]
return data
def get_network_latency_metrics(self, server_ip):
node_id = self.get_node_id(server_ip)
response = self.send_get_requests(f'SolarWinds/InformationService/v3/Json/Query?query=SELECT+NodeId,DateTime,AvgResponseTime+FROM+Orion.ResponseTime+WHERE+NodeID={node_id}')
if response.ok:
results = response.json().get('results')
data = [[self.convert_time_to_timestamp(load.get('DateTime')), load.get('AvgResponseTime')] for load in results]
return data
def send_get_requests(self, url):
url = self.generate_url(url)
response = requests.get(url, auth=self.get_auth(), verify=False)
return response
def convert_time_to_timestamp(self, date):
date_time = dateutil.parser.parse(date)
# Time displayed on the Graph is 1 hour behind UTC
utc_zone = tz.tzutc()
utc_time = date_time.astimezone(utc_zone)
utc_time = utc_time - timedelta(hours=1)
return utc_time.timestamp()*1000
def setup_solar_winds_install_agent_action(self):
solar_winds_hook = {
'name': "Install SolarWinds Agent",
'description': "Installs SolarWinds Agent on a Server",
'hook_point': None,
'module': '/var/opt/cloudbolt/proserv/xui/solar_winds/install_agent.py',
}
create_hook(**solar_winds_hook)
def is_license_valid(self):
response = self.send_get_requests(f'SolarWinds/InformationService/v3/Json/Query?query=SELECT+LicenseExpiresOn+FROM+Orion.Licensing.Licenses')
if response.ok:
results = response.json().get('results')
for license in results:
if float(self.convert_time_to_timestamp(license.get('LicenseExpiresOn')) > (time.time()* 1000)):
return True
return False
raise Exception(response.reason)
|
StarcoderdataPython
|
40978
|
from distutils.core import setup
from Cython.Build import cythonize
setup(
name = "tax",
ext_modules = cythonize('tax.pyx'),
script_name = 'setup.py',
script_args = ['build_ext', '--inplace']
)
import tax
import numpy as np
print(tax.tax(np.ones(10)))
|
StarcoderdataPython
|
1724620
|
import glob
import json
files = glob.glob("../docs/curation_bk/*.json")
for file in files:
with open(file) as f:
df = json.load(f)
print(file)
for selection in df["selections"]:
label = selection["within"]["label"]
attribution = "utokyo"
if "張交帖" in label:
attribution = "ndl"
selection["@id"] = selection["@id"].replace("lire", "kunshujo-i").replace("curation", "curation/automatic/"+attribution)
df["@id"] = df["@id"].replace(
"lire", "kunshujo-i").replace("curation", "curation/automatic/"+attribution)
with open(file.replace("curation_bk", "curation/automatic/"+attribution), 'w') as f:
json.dump(df, f, ensure_ascii=False, indent=4,
sort_keys=True, separators=(',', ': '))
|
StarcoderdataPython
|
3252005
|
<filename>kickstart-menu/menu.py
#!/usr/bin/env python
# pylint: disable=too-many-ancestors
"""Menu system."""
import sys
import npyscreen
import classes
import datetime
import re
from kickstart import *
def str_ljust(_string):
"""Add padding to string."""
pad = 20
return str(_string.ljust(pad, ".") + ":")
def update_enabled_widget(widget):
"""Update."""
if widget.parent.enabled.value == [0]:
widget.parent.interface.editable = True
widget.parent.display()
else:
widget.parent.interface.editable = False
widget.parent.interface.value = None
widget.parent.display()
def update_bootproto_widget(widget):
"""Update."""
if widget.parent.bootproto.value == [1]:
widget.parent.ipaddress.editable = False
widget.parent.ipaddress.hidden = True
widget.parent.ipaddress.color = 'NO_EDIT'
widget.parent.netmask.editable = False
widget.parent.netmask.hidden = True
widget.parent.netmask.color = 'NO_EDIT'
widget.parent.display()
else:
widget.parent.ipaddress.editable = True
widget.parent.ipaddress.hidden = False
widget.parent.ipaddress.color = 'DEFAULT'
widget.parent.netmask.editable = True
widget.parent.netmask.hidden = False
widget.parent.netmask.color = 'DEFAULT'
widget.parent.display()
# pylint: disable=too-many-instance-attributes
class menuSystem(npyscreen.NPSAppManaged):
""" All Forms registered with an NPSAppManaged instance can access the
controlling application as self.parentApp.
"""
def calculate_menu_height(self):
"""Calculate menu height for wid2et."""
return max(2, len(self.host.interfaces))
# pylint: disable=attribute-defined-outside-init
def onStart(self):
"""Register all forms for application."""
self.begin_at = 25
self.bootproto = ["static", "dhcp"]
self.teaming = ['yes', 'no']
self.host = classes.Host()
self.network_pxe = classes.PXENetwork()
self.network_cluster = classes.ClusterNetwork()
self.network_trust = classes.Network()
self.network_untrust = classes.Network()
self.network_passive = classes.Network()
self.storage_os = classes.Storage(mountpoint="/")
self.storage_fast = classes.Storage(mountpoint="/var/EDCOP/fast")
self.storage_bulk = classes.Storage(mountpoint="/var/EDCOP/bulk")
self.storage_shared = classes.Storage(mountpoint="/var/EDCOP/shared")
self.addForm("MAIN", MainForm)
self.addForm("HOSTNAME", HostEditForm)
self.addForm("NETWORKSELECT", NetworkSelectForm)
self.addForm("NETWORKPXE", PXENetForm)
self.addForm("NETWORKCLUSTER", ClusterNetForm)
self.addForm("NETWORKTRUST", NetworkEditForm,
network=self.network_trust, name="Trust (LAN)")
self.addForm("NETWORKUNTRUST", NetworkEditForm,
network=self.network_untrust, name="Untrust (WAN)")
self.addForm("NETWORKPASSIVE", NetworkEditForm,
network=self.network_passive, name="Passive")
self.addForm("STORAGESELECT", StorageSelectForm)
self.addForm("STORAGEOS", StorageEditForm, storage=self.storage_os, name="EDCOP OS")
self.addForm("STORAGEFAST", StorageEditForm, storage=self.storage_fast, name="Fast")
self.addForm("STORAGEBULK", StorageEditForm, storage=self.storage_bulk, name="Bulk")
self.addForm("STORAGESHARED", StorageEditForm, storage=self.storage_shared, name="Shared")
# pylint: disable=too-many-instance-attributes
class MainMenuWidget(npyscreen.MultiLineAction):
"""Display main menu."""
def __init__(self, *args, **keywords):
"""Init."""
super(MainMenuWidget, self).__init__(*args, **keywords)
self.menu_hostname = "Set Hostname"
self.menu_network = "Configure Network"
self.menu_storage = "Configure Storage"
self.values = [self.menu_hostname,
self.menu_network,
self.menu_storage]
self.max_height = len(self.values) + 1
# pylint: disable=invalid-name
def actionHighlighted(self, act_on_this, key_press):
"""Select form."""
if act_on_this == self.menu_hostname:
self.parent.parentApp.switchForm("HOSTNAME")
if act_on_this == self.menu_network:
self.parent.parentApp.switchForm("NETWORKSELECT")
if act_on_this == self.menu_storage:
self.parent.parentApp.switchForm("STORAGESELECT")
# pylint: disable=too-many-instance-attributes
class NetworkMenuWidget(npyscreen.MultiLineAction):
"""Display main menu for networks """
def __init__(self, *args, **keywords):
""" Initalize form
Note: inline trust, inline untrust, and passive networks are currently disabled
"""
super(NetworkMenuWidget, self).__init__(*args, **keywords)
self.menu_pxe = "PXE Network"
self.menu_cluster = "Cluster Network"
# self.menu_trust = "Inline-Trust (LAN) Network"
# self.menu_untrust = "Inline-UnTrust (WAN) Network"
# self.menu_passive = "Passive Network"
self.values = [self.menu_pxe,
self.menu_cluster]
self.max_height = len(self.values) + 1
# pylint: disable=invalid-name
def actionHighlighted(self, act_on_this, key_press):
"""Select form."""
if act_on_this == self.menu_pxe:
self.parent.parentApp.switchForm("NETWORKPXE")
if act_on_this == self.menu_cluster:
self.parent.parentApp.switchForm("NETWORKCLUSTER")
#if act_on_this == self.menu_trust:
# self.parent.parentApp.switchForm("NETWORKTRUST")
#if act_on_this == self.menu_untrust:
# self.parent.parentApp.switchForm("NETWORKUNTRUST")
#if act_on_this == self.menu_passive:
# self.parent.parentApp.switchForm("NETWORKPASSIVE")
# pylint: disable=too-many-instance-attributes
class StorageMenuWidget(npyscreen.MultiLineAction):
"""Display main menu."""
def __init__(self, *args, **keywords):
"""Init."""
super(StorageMenuWidget, self).__init__(*args, **keywords)
self.menu_os = "EDCOP OS"
self.menu_fast = "Local-Fast"
self.menu_bulk = "Local-Bulk"
self.menu_shared = "Shared"
self.values = [self.menu_os,
self.menu_fast,
self.menu_bulk,
self.menu_shared]
self.max_height = len(self.values) + 1
# pylint: disable=invalid-name
def actionHighlighted(self, act_on_this, key_press):
"""Select form."""
if act_on_this == self.menu_os:
self.parent.parentApp.switchForm("STORAGEOS")
if act_on_this == self.menu_fast:
self.parent.parentApp.switchForm("STORAGEFAST")
if act_on_this == self.menu_bulk:
self.parent.parentApp.switchForm("STORAGEBULK")
if act_on_this == self.menu_shared:
self.parent.parentApp.switchForm("STORAGESHARED")
class MainForm(npyscreen.ActionFormMinimal):
"""Home Screen."""
def create(self):
"""Run at instantiation."""
self.name = "EDCOP"
self.add(MainMenuWidget)
def on_ok(self):
"""Next."""
# Validate all forms have the minimum required data
hostnameComplete = False
clusterNetworkComplete = False
pxeNetworkComplete = False
storageComplete = False
incompleteForms = ""
""" Hostname Validation """
if(KICKSTART_MENU.host.name!=""):
hostnameComplete = True
else:
incompleteForms += "\nHostname"
""" PXE Network Validation """
if((KICKSTART_MENU.network_pxe.ip_address != "") and (KICKSTART_MENU.network_pxe.netmask != "") and (KICKSTART_MENU.network_pxe.interface != None)):
if((KICKSTART_MENU.network_pxe.bootproto == "dhcp") and (KICKSTART_MENU.network_pxe.dhcp_start != "") and (KICKSTART_MENU.network_pxe.dhcp_end != "")):
pxeNetworkComplete = True
elif(KICKSTART_MENU.network_pxe.bootproto == "static"):
pxeNetworkComplete = True
else:
incompleteForms += "\nPXE Network"
""" Cluster Network Valdiation """
if((KICKSTART_MENU.network_cluster.ip_address != "") and (KICKSTART_MENU.network_cluster.netmask != "") and (KICKSTART_MENU.network_cluster.interface != None)):
clusterNetworkComplete = True
else:
incompleteForms += "\nCluster Network"
""" Storage Validation """
if((KICKSTART_MENU.storage_os.mountpoint != "") and (KICKSTART_MENU.storage_os.disk != None)):
storageComplete = True
else:
incompleteForms += "\nStorage (EDCOP OS)"
# Raise an error to the user if they are missing data in any mandatory form
if ((hostnameComplete==True) and (clusterNetworkComplete==True) and (pxeNetworkComplete==True) and (storageComplete==True)):
try:
self.editing = False
self.parentApp.setNextForm(None)
except:
npyscreen.notify_confirm("Something went wrong. Please try again.", title="Error")
else:
formMessage = "There appears to be missing data on the following forms: \n \n \n" + incompleteForms
npyscreen.notify_confirm(formMessage, title="Error")
def exit_application(self):
self.editing = False
self.parentApp.setNextForm(None)
class NetworkSelectForm(npyscreen.ActionFormMinimal):
# Class for the form that has options for PXE, cluster, passive, etc sub-menus
"""Form."""
def create(self):
"""Run at instantiation."""
self.name = "EDCOP > Network"
self.add(NetworkMenuWidget)
def on_ok(self):
"""Next."""
self.parentApp.setNextForm("MAIN")
class StorageSelectForm(npyscreen.ActionFormMinimal):
"""Form."""
def create(self):
"""Run at instantiation."""
self.name = "EDCOP > Storage"
self.add(StorageMenuWidget)
def on_ok(self):
"""Next."""
self.parentApp.setNextForm("MAIN")
class HostEditForm(npyscreen.ActionFormV2):
"""Edit Hostname."""
def create(self):
"""Create method is called by the Form constructor.
It does nothing by default - it is there for you to override in subclasses,
but it is the best place to set up all the widgets on a Form. Expect this
method to be full of self.add(...) method calls, then!
"""
self.name = "Host configuration:"
self.hostname = self.add(npyscreen.TitleText, name="Hostname")
# pylint: disable=invalid-name
def beforeEditing(self):
"""Call before form is edited."""
self.hostname.value = self.parentApp.host.name
# pylint: disable=invalid-name
def afterEditing(self):
"""Call when the form is exited."""
self.parentApp.host.name = self.hostname.value
self.parentApp.switchFormPrevious()
def on_ok(self):
if (self.hostname.value != ""):
try:
self.parentApp.host.name = self.hostname.value
except:
npyscreen.notify_confirm("Something went wrong. Please check your hostname", title="Error")
else:
npyscreen.notify_confirm("You must enter a hostname.", title="Error")
class NetForm(npyscreen.ActionFormV2):
# Base Network Form class.
def create(self):
"""Create method is called by the Form constructor."""
self.begin_at = self.parentApp.begin_at
self.bootproto = self.add(npyscreen.TitleSelectOne,
name=str_ljust("Bootproto"),
begin_entry_at=self.begin_at,
max_height=3,
scroll_exit=True)
self.teaming = self.add(npyscreen.TitleSelectOne,
name=str_ljust("NIC Teaming"),
begin_entry_at=self.begin_at,
max_height=3,
scroll_exit=True)
self.interface = self.add(npyscreen.TitleMultiSelect,
name=str_ljust("Interface"),
begin_entry_at=self.begin_at,
#max_height=self.parentApp.calculate_menu_height,
max_height=8,
scroll_exit=True)
self.ipaddress = self.add(npyscreen.TitleText,
name=str_ljust("IP Address"),
begin_entry_at=self.begin_at)
self.netmask = self.add(npyscreen.TitleText,
name=str_ljust("Netmask"),
begin_entry_at=self.begin_at)
self.dhcp_start = self.add(npyscreen.TitleText,
name=str_ljust("DHCP start"),
begin_entry_at=self.begin_at)
self.dhcp_end = self.add(npyscreen.TitleText,
name=str_ljust("DHCP end"),
begin_entry_at=self.begin_at)
self.dns1 = self.add(npyscreen.TitleText,
name=str_ljust("Primary DNS"),
begin_entry_at=self.begin_at)
self.dns2 = self.add(npyscreen.TitleText,
name=str_ljust("Secondary DNS"),
begin_entry_at=self.begin_at)
self.gateway = self.add(npyscreen.TitleText,
name=str_ljust("Gateway"),
begin_entry_at=self.begin_at)
self.dhcp_start.hidden = True
self.dhcp_end.hidden = True
self.dns1.hidden = True
self.dns2.hidden = True
self.gateway.hidden = True
self.bootproto.values = ['static', 'dhcp']
self.teaming.values = ['yes', 'no']
#self.bootproto.value = 0
self.bootproto.value_changed_callback = update_bootproto_widget
def on_cancel(self):
"""Next."""
self.parentApp.switchFormPrevious()
# pylint: disable=too-many-instance-attributes
class PXENetForm(NetForm):
# PXE Network Form. Extends the NetForm class
# pylint: disable=invalid-name
# pylint: disable=attribute-defined-outside-init
def beforeEditing(self):
"""Call before form is edited."""
self.name = "EDCOP > Network > PXE"
self.network = self.parentApp.network_pxe
# combine interface with current operation state
interfaceState = curOperstate(self.parentApp.host.interfaces)
state = ['']*len(self.parentApp.host.interfaces)
for idx in range(len(self.parentApp.host.interfaces)):
state[idx] = self.parentApp.host.interfaces[idx] + " (" + interfaceState[idx] + ")"
self.interface.values = state
self.ipaddress.value = self.network.ip_address
self.netmask.value = self.network.netmask
self.dhcp_start.value = self.network.dhcp_start
self.dhcp_end.value = self.network.dhcp_end
self.dhcp_start.hidden = False
self.dhcp_end.hidden = False
self.teaming.hidden = True
def on_ok(self):
"""Save network information to object."""
errors = ''
try:
self.network.bootproto = self.parentApp.bootproto[self.bootproto.value[0]]
self.network.interface = self.parentApp.host.interfaces[self.interface.value[0]]
if (validateIP(self.ipaddress.value) == True):
self.network.ip_address = self.ipaddress.value
else:
errors += '\nIP Address'
if (validateNetmask(self.netmask.value) == True):
self.network.netmask = self.netmask.value
else:
errors += '\nNetmask'
self.network.network = networkID(self.network.ip_address, self.network.netmask)
self.network.dhcp_start = self.dhcp_start.value
self.network.dhcp_end = self.dhcp_end.value
# If there are no issues, jump to parent form, otherwise, alert so user can fix
if (errors == ''):
self.parentApp.switchFormPrevious()
else:
formMessage = "There appears to be missing or invalid data on the following fields: \n \n \n" + errors
npyscreen.notify_confirm(formMessage, title="Error")
except IndexError:
npyscreen.notify_confirm("Please select a valid interface", title="Error")
class ClusterNetForm(NetForm):
# Cluster network form. Extends the NetForm class
# pylint: disable=invalid-name
# pylint: disable=attribute-defined-outside-init
def beforeEditing(self):
"""Update values."""
self.name = "EDCOP > Network > Cluster"
self.network = self.parentApp.network_cluster
self.ipaddress.value = self.network.ip_address
# combine interface with current operation state
interfaceState = curOperstate(self.parentApp.host.interfaces)
state = ['']*len(self.parentApp.host.interfaces)
for idx in range(len(self.parentApp.host.interfaces)):
state[idx] = self.parentApp.host.interfaces[idx] + " (" + interfaceState[idx] + ")"
self.interface.values = state
self.ipaddress.value = self.network.ip_address
self.netmask.value = self.network.netmask
self.dns1.value = self.network.dns1
self.dns2.value = self.network.dns2
self.gateway.value = self.network.gateway
self.dns1.hidden = False
self.dns2.hidden = False
self.gateway.hidden = False
self.teaming.value = self.network.teaming
def on_ok(self):
"""Save network information to object."""
errors = ''
try:
interfaceList = []
for index in range(len(self.interface.value)):
interfaceList.append(self.parentApp.host.interfaces[self.interface.value[index]])
self.network.interface = interfaceList
self.network.bootproto = self.parentApp.bootproto[self.bootproto.value[0]]
self.network.teaming = self.parentApp.teaming[self.teaming.value[0]]
if (validateIP(self.ipaddress.value) == True):
self.network.ip_address = self.ipaddress.value
else:
errors += '\nIP Address'
if (validateNetmask(self.netmask.value) == True):
self.network.netmask = self.netmask.value
else:
errors += '\nNetmask'
self.network.network = networkID(self.network.ip_address, self.network.netmask)
if (validateIP(self.dns1.value) == True):
self.network.dns1 = self.dns1.value
else:
errors += '\nDNS1'
if (validateIP(self.dns2.value) == True):
self.network.dns2 = self.dns2.value
else:
errors += '\nDNS2'
if (validateIP(self.gateway.value) == True):
self.network.gateway = self.gateway.value
else:
errors += '\nGateway'
# If there are no issues, jump to parent form, otherwise, alert so user can fix
if (errors == ''):
self.parentApp.switchFormPrevious()
else:
formMessage = "There appears to be missing or invalid data on the following fields: \n \n \n" + errors
npyscreen.notify_confirm(formMessage, title="Error")
except IndexError:
npyscreen.notify_confirm("Please select a valid interface", title="Error")
class NetworkEditForm(npyscreen.ActionFormV2):
"""Form."""
def __init__(self, network, name, *args, **keywords):
"""Init."""
super(NetworkEditForm, self).__init__()
self.network = network
self.name = "EDCOP > Network > " + name
def create(self):
"""Add."""
self.enabled = self.add(npyscreen.TitleSelectOne,
name=str_ljust("Enable Interface"),
max_height=3, scroll_exit=True,
begin_entry_at=25)
self.interface = self.add(npyscreen.TitleSelectOne,
name=str_ljust("Interface"),
scroll_exit=True,
begin_entry_at=25,
editable=True)
self.enabled.value_changed_callback = update_enabled_widget
# pylint: disable=invalid-name
def beforeEditing(self):
"""Refresh."""
self.enabled.values = ["Enabled",
"Disabled"]
self.enabled.value = [1]
self.interface.values = self.parentApp.host.interfaces
def on_ok(self):
"""Ok."""
self.parentApp.setNextForm("NETWORKSELECT")
if self.enabled.value == [0]:
try:
self.network.enabled = True
self.network.interface = self.parentApp.host.interfaces[self.interface.value[0]]
except IndexError:
npyscreen.notify_confirm("Please select a valid interface", title="Error")
def on_cancel(self):
"""Cancel."""
self.parentApp.setNextForm("NETWORKSELECT")
class StorageEditForm(npyscreen.ActionFormV2):
"""Form."""
def __init__(self, storage, name, *args, **keywords):
"""Init."""
super(StorageEditForm, self).__init__(*args, **keywords)
self.storage = storage
self.name = "EDCOP > Storage > " + name
def create(self):
"""Add."""
self.mount = self.add(npyscreen.TitleText, name="Mountpoint")
self.disk = self.add(npyscreen.TitleSelectOne, name="Disk", scroll_exit=True)
# pylint: disable=invalid-name
def beforeEditing(self):
"""Refresh."""
self.mount.value = self.storage.mountpoint
self.disk.values = self.parentApp.host.harddrives
def on_ok(self):
"""Ok."""
try:
self.storage.mountpoint = self.mount.value
self.storage.disk = self.parentApp.host.harddrives[self.disk.value[0]]
self.parentApp.setNextForm("STORAGESELECT")
except IndexError:
npyscreen.notify_confirm("Please select a valid storage drive", title="Error")
def on_cancel(self):
"""Cancel."""
self.parentApp.setNextForm("STORAGESELECT")
class EDCOPOSForm(StorageEditForm):
# Class for EDCOPOS Form. Extends Storage Form
def beforeEditing(self):
self.name = "EDCOP > Storage > EDCOP OS"
def on_ok(self):
pass
# Helper functions
def logData(KICKSTART_MENU):
# Dump various data to a log file for TSHOOT purposes
outFile = open("/tmp/dev.log", "w")
dump = ""
now = datetime.datetime.now()
outFile.write(now.isoformat())
dump += now.isoformat() + "\n\n"
dump += "=================================" + "\n\n"
dump += KICKSTART_MENU.host.name + "\n"
dump += str(KICKSTART_MENU.host.__dict__) + "\n"
dump += str(KICKSTART_MENU.network_pxe.__dict__) + "\n"
dump += str(KICKSTART_MENU.network_cluster.__dict__) + "\n"
dump += str(KICKSTART_MENU.network_trust.__dict__) + "\n"
dump += str(KICKSTART_MENU.network_untrust.__dict__) + "\n"
dump += str(KICKSTART_MENU.network_passive.__dict__) + "\n"
dump += str(KICKSTART_MENU.storage_os.__dict__) + "\n"
dump += str(KICKSTART_MENU.storage_fast.__dict__) + "\n"
dump += str(KICKSTART_MENU.storage_bulk.__dict__) + "\n"
dump += str(KICKSTART_MENU.storage_shared.__dict__) + "\n"
outFile.write(dump)
outFile.close()
def networkID(ip, netmask):
# Get the network ID based on a given IP and netmask
ip_split = [0]*4
netmask_split = [0]*4
network = [0]*4
ip_split = ip.split('.')
netmask_split = netmask.split('.')
for ind in range(len(ip_split)):
network[ind] = int(ip_split[ind]) & int(netmask_split[ind])
return '.'.join(str(idx) for idx in network)
def curOperstate(interfaces):
# Get the operational status of every NIC
state = []
for name in os.listdir('/sys/class/net'):
if not name == 'lo':
state.append(open("/sys/class/net/"+name+"/operstate", "r").read().strip('\n'))
return state
def validateIP(IP):
# validate that a passed in IP is valid or not.
# Return: true if valid, false if not valid
# make sure user doesn't put in "potato" for an IP
octet = IP.split('.')
if len(octet) != 4:
return False
try:
isValid = all(0<=int(idx)<256 for idx in octet)
if isValid is False:
return False
except ValueError:
return False
# verify that the first octet isn't 0
firstOctet = re.search('^(0).*', IP)
try:
if (firstOctet.group(1) == '0'):
return False
except:
pass
return True
def validateNetmask(mask):
# validate that a passed in network mask is valid or not.
# Return: true if valid, false if not valid
maskGroup = re.search('^(\d{1,3})[.](\d{1,3})[.](\d{1,3})[.](\d{1,3})$', mask)
# See if anything matched
if (maskGroup is None):
return False
# Check to see if netmask is non-zero
if(maskGroup.group(1) == '0'):
return False
# validate mask is actually valid
count = 7
val = 0
maskValues = ['0']
while count >= 0:
val += 0b1 << count
maskValues.append(str(val))
count -= 1
for idx in range(1,5):
if (any(maskGroup.group(idx) in item for item in maskValues) != True):
return False
return True
if __name__ == '__main__':
try:
KICKSTART_MENU = menuSystem()
KICKSTART_MENU.run()
logData(KICKSTART_MENU)
kickstartGenerator(master, KICKSTART_MENU)
kickstartGenerator(minion, KICKSTART_MENU)
sys.exit(0)
except KeyboardInterrupt:
logData(KICKSTART_MENU)
sys.exit(0)
|
StarcoderdataPython
|
1607035
|
#!/usr/bin/env python
#
# Copyright (c) 2016-2021 InSeven Limited
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import dateutil
class TransformFailure(Exception):
pass
class Skip(Exception):
pass
class Key(object):
def __init__(self, key):
self.key = key
def __call__(self, dictionary):
if self.key in dictionary:
return dictionary[self.key]
raise TransformFailure()
class First(object):
def __init__(self, *transforms):
self.transforms = transforms
def __call__(self, data):
for transform in self.transforms:
try:
return transform(data)
except TransformFailure:
pass
raise TransformFailure()
class Default(object):
def __init__(self, value):
self.value = value
def __call__(self, data):
return self.value
class Empty(object):
def __call__(self, data):
raise Skip()
class Identity(object):
def __call__(self, data):
return data
class String(object):
def __init__(self, transform):
self.transform = transform
def __call__(self, data):
data = self.transform(data)
if data is not None:
return str(data)
return None
GPS_COORDINATE_EXPRESSION = re.compile(r"^(\d+\.\d+) ([NSEW])$")
class GPSCoordinate(object):
def __init__(self, transform):
self.transform = transform
def __call__(self, data):
data = self.transform(data)
match = GPS_COORDINATE_EXPRESSION.match(data)
if not match:
raise AssertionError(f"Invalid GPS coordinate '{data}'.")
value = float(match.group(1))
direction = match.group(2)
if direction == "W":
value = value * -1
return value
class EXIFDate(object):
def __init__(self, transform):
self.transform = transform
def __call__(self, data):
data = self.transform(data)
# ExifTool makes the odd decision to separate the date components with a colon, meaning that `dateutil` cannot
# parse it directly, so we fix it up.
# Skip zero dates.
if data == "0000:00:00 00:00:00":
raise TransformFailure()
value = dateutil.parser.parse(data.replace(":", "-", 2))
return value
class Dictionary(object):
def __init__(self, schema):
self.schema = schema
def __call__(self, data):
result = {}
for key, transform in self.schema.items():
try:
result[key] = transform(data)
except Skip:
pass
return result
|
StarcoderdataPython
|
3248849
|
#!/usr/bin/env python3
import logging
from benchpress.lib.hook import Hook
logger = logging.getLogger(__name__)
class Emon(Hook):
"""Emon hook allows the benchmark to collect CPU utilization data across
execution time of application or system"""
emon_proc = None
def before_job(self, opts, job):
self._start_background_emon(opts)
def after_job(self, opts, job):
util.kill_process(emon_proc)
def _start_background_emon(self, opts):
"""Allows the emon data collection to happen in the background while
application is executing"""
cmd = ['./emon -i']
try:
cmd += opts['config_file']
except:
raise KeyError('Config file not specified in opts arg')
stdout_path = bg_opts.get('stdout_path', DEFAULT_STDOUT_PATH)
stderr_path = bg_opts.get('stderr_path', DEFAULT_STDERR_PATH)
emon_proc = util.issue_background_command(
cmd,
stdout_path,
stderr_path
)
|
StarcoderdataPython
|
1615357
|
<gh_stars>1-10
import os
import json
import yaml
import logging
from .project import Project
from .plugin import PluginType, PluginInstall, PluginRef
from .plugin_discovery_service import PluginDiscoveryService
from .plugin.factory import plugin_factory
from .config_service import ConfigService
class PluginNotSupportedException(Exception):
pass
class PluginAlreadyAddedException(Exception):
def __init__(self, plugin: PluginRef):
self.plugin = plugin
super().__init__()
class MissingPluginException(Exception):
pass
class ProjectAddService:
def __init__(
self,
project: Project,
plugin_discovery_service: PluginDiscoveryService = None,
config_service: ConfigService = None,
):
self.project = project
self.discovery_service = plugin_discovery_service or PluginDiscoveryService(
project
)
self.config_service = config_service or ConfigService(project)
def add(self, plugin_type: PluginType, plugin_name: str, **kwargs) -> PluginInstall:
plugin = self.discovery_service.find_plugin(plugin_type, plugin_name)
installed = plugin.as_installed()
return self.config_service.add_to_file(installed)
|
StarcoderdataPython
|
3301225
|
<gh_stars>1-10
import time
import datetime
import sys
import getopt, argparse
from collections import defaultdict
import json
import MySQLdb
import unicodecsv
import pprint
ITEM_MAP_VARCHAR_INSERT = "insert into item_map_varchar (item_id, attr_id, value) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), %(value)s )"
ITEM_MAP_DOUBLE_INSERT = "insert into item_map_double (item_id, attr_id, value) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), %(value)s )"
ITEM_MAP_DATETIME_INSERT = "insert into item_map_datetime (item_id, attr_id, value) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), %(value)s )"
ITEM_MAP_ENUM_INSERT = "insert into item_map_enum (item_id, attr_id, value_id) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), (select e.value_id from ITEM_ATTR_ENUM e, item_attr a where a.name = %(attr_name)s and e.value_name = %(value)s and a.attr_id = e.attr_id) )"
ITEM_MAP_TEXT_INSERT = "insert into item_map_text (item_id, attr_id, value) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), %(value)s )"
ITEM_MAP_INT_INSERT = "insert into item_map_int (item_id, attr_id, value) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), %(value)s )"
ITEM_MAP_BOOLEAN_INSERT = "insert into item_map_boolean (item_id, attr_id, value) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), %(value)s )"
ITEM_INSERT = "INSERT INTO ITEMS (name, first_op, last_op, client_item_id, type) VALUES (%(name)s, NOW(), NOW(), %(id)s, 1)"
ITEM_INSERT_NO_AUTO_INCREMENT = "INSERT INTO ITEMS (item_id, name, first_op, last_op, client_item_id, type) VALUES (%(item_id)s, %(name)s, NOW(), NOW(), %(id)s, 1)"
DB_BATCH_SIZE = 1000
attr_insert_map = {
'ENUM': ITEM_MAP_ENUM_INSERT,
'BOOLEAN': ITEM_MAP_BOOLEAN_INSERT,
'VARCHAR': ITEM_MAP_VARCHAR_INSERT,
'TEXT': ITEM_MAP_TEXT_INSERT,
'DATETIME': ITEM_MAP_DATETIME_INSERT,
'INT': ITEM_MAP_INT_INSERT,
'DOUBLE': ITEM_MAP_DOUBLE_INSERT
}
available_attrs = dict()
available_enums = dict()
def pp(o):
p = pprint.PrettyPrinter(indent=4)
p.pprint(o)
def retrieveDbAttrs(db):
cur = db.cursor()
cur.execute("SELECT ATTR_ID, NAME, TYPE FROM ITEM_ATTR")
rows = cur.fetchall()
attrs = dict()
for row in rows:
attrs[row[1]]= (row[0], row[2])
return attrs
def retrieveDbEnums(db):
cur = db.cursor()
# enum structure:
# attr_id1:
# value_name1 : value_id1
# value_name2 :value_id2
cur.execute("SELECT ATTR_ID, VALUE_NAME, VALUE_ID FROM ITEM_ATTR_ENUM")
rows = cur.fetchall()
enums = defaultdict(dict)
for row in rows:
enums[row[0]][row[1]] = row[2]
return enums
def validateCSVAgainstDb(csv_file, db):
global available_attrs, available_enums
failed = False
attrs = retrieveDbAttrs(db)
available_attrs = attrs
enums = retrieveDbEnums(db)
available_enums = enums
with open(csv_file) as csvFile:
reader = unicodecsv.DictReader(csvFile,encoding='utf-8')
noOfFields = 0
for index, line in enumerate(reader, start=1):
if index is 1:
noOfFields = len(line)
if not validateFieldsAgainstDbFields(set(line), attrs, db):
exit(1)
validateLine(index,line, noOfFields, attrs, enums)
if len(line) != noOfFields:
failLine(index, line)
failed = True
if failed:
exit(1)
def validateLine(index,line, noOfFields, attrs, enums):
if len(line) != noOfFields:
failLine(index, line)
failed = True
else:
for word in line:
if str(word) == 'id':
continue
if str(word) == 'name':
continue
value = line[word]
if str(attrs[word][1]) == 'ENUM':
if value not in enums[attrs[word][0]]:
print 'couldn\'t find enum value', value
exit(1)
def validateFieldsAgainstDbFields(fields,attrs, db):
failed = False
for field in fields:
if field not in attrs and field != 'id' and field != 'name':
failed = True
print 'Field \'',field,'\'not an attribute in the DB'
return not failed
def doItemInserts(csv_file, db):
with open(csv_file) as csvFile:
reader = unicodecsv.DictReader(csvFile,encoding='utf-8')
inserts = []
for line in reader:
client_id = line['id']
name = ''
if 'name' in line:
name = line['name']
inserts.append({'name':name,'id':client_id, 'item_id':client_id})
cur = db.cursor()
print "inserting items into the db"
###cur.executemany(ITEM_INSERT, inserts)
cur.executemany(ITEM_INSERT_NO_AUTO_INCREMENT, inserts)
db.commit()
print 'finished item inserts'
def doAttrInserts(csv_file, db):
inserts = defaultdict(list)
insertNum = 0
with open(csv_file) as csvFile:
reader = unicodecsv.DictReader(csvFile,encoding='utf-8')
for line in reader:
for field_name in line:
if field_name == 'id' or field_name== 'name':
continue
attr_type = available_attrs[str(field_name)][1]
inserts[attr_type].append({'attr_name': field_name, 'value': line[field_name], 'id': line['id']})
if len(inserts[attr_type]) > DB_BATCH_SIZE:
insertNum+=1
reallyDoInserts(inserts[attr_type], attr_insert_map[attr_type], insertNum, db)
del inserts[attr_type]
for index, insert_label in enumerate(inserts, start=1):
insertNum+=1
reallyDoInserts(inserts[insert_label], attr_insert_map[insert_label], insertNum, db)
db.commit()
print 'finished attribute inserts'
def reallyDoInserts(params, insertStatement, insertNum, db):
cur = db.cursor()
print "inserting attribute batch", insertNum,'into the db'
cur.executemany(insertStatement, params)
def failLine(lineNum, line):
print "line",lineNum,"failed as it only had",len(line),"fields"
def cleanUpDb(db):
dbc = db.cursor()
dbc.execute('truncate table items')
dbc.execute('truncate table item_map_varchar')
dbc.execute('truncate table item_map_double')
dbc.execute('truncate table item_map_datetime')
dbc.execute('truncate table item_map_int')
dbc.execute('truncate table item_map_boolean')
dbc.execute('truncate table item_map_enum')
dbc.execute('truncate table item_map_text')
db.commit()
def import_items(client_name, db_settings, data_file_fpath):
db = MySQLdb.connect(
host=db_settings['host'],
user=db_settings['user'],
passwd=db_settings['password'],
db=client_name
)
db.set_character_set('utf8')
dbc = db.cursor()
dbc.execute('SET NAMES utf8;')
dbc.execute('SET CHARACTER SET utf8;')
dbc.execute('SET character_set_connection=utf8;')
dbc.execute("SET GLOBAL max_allowed_packet=1073741824")
try:
validateCSVAgainstDb(data_file_fpath, db)
doItemInserts(data_file_fpath, db)
doAttrInserts(data_file_fpath,db)
except:
print 'Unexpected error ...', sys.exc_info()[0]
print 'Clearing DB of items and attributes'
try:
cleanUpDb(db)
except:
print 'couldn\'t clean up db'
raise
print "Successfully ran all inserts"
|
StarcoderdataPython
|
196012
|
<reponame>Domepo/icalGenerator
from icalendar import Calendar, Event, Alarm
from datetime import date, datetime, timedelta
class addICS:
def __init__(self, file):
self.file = file
self.cal = Calendar()
self.cal.add("version", "2.0")
self.cal.add("prodid", "Technik-Kalender")
def add(
self, dateStart, dateEnd, summary, location="", description="", categories=""
):
event = Event()
alarm = Alarm()
alarm.add("description","This is an event reminder")
alarm.add("action","DISPLAY")
alarm.add("trigger",timedelta(minutes=-10))
event.add("summary", summary)
event.add("location", location)
event.add("categories", categories, encode=0)
event.add("description", description)
event.add("dtstart", dateStart)
event.add("dtend", dateEnd)
event.add("dtstamp", datetime.now())
event.add("priority", 5)
event.add_component(alarm)
self.cal.add_component(event)
def save(self):
f = open(self.file, "wb")
f.write(self.cal.to_ical())
f.close()
|
StarcoderdataPython
|
3334092
|
<gh_stars>1-10
"""Constants for the SENZ WiFi integration."""
DOMAIN = "senz"
VERSION = "0.0.6"
SENZ_API = "https://api.senzthermostat.nvent.com/api/v1"
OAUTH2_AUTHORIZE = "https://id.senzthermostat.nvent.com/connect/authorize"
OAUTH2_TOKEN = "https://id.senzthermostat.nvent.com/connect/token"
|
StarcoderdataPython
|
1698436
|
# -*- coding: utf-8 -*-
import smtplib
import email
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.base import MIMEBase
from email.mime.application import MIMEApplication
from email.header import Header
import configs
class EmailToolkit(object):
smtp_host = configs.SMTP_HOST
smtp_port = configs.SMTP_PORT
sending_address = configs.SENDING_ADDRESS
smtp_password = configs.SMTP_PASSWORD
reply_to = configs.REPLY_TO
def send_activation_email(self, send_to, username, activate_url):
email_html = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>『Celery Soft 学术』激活账号</title>
</head>
<body>
<div style="font-size: 14px;">
<p>亲爱的{}:</p>
<p>您好,感谢注册『Celery Soft 学术』,只差最后一步,就能完成注册,在48小时内点击下面的链接即可:</p>
<p></p>
<div><a href="{}">{}</a></div>
<div>(如果链接无法点击,请将它拷贝到浏览器的地址栏中直接打开)</div>
<p>如果您最近没有进行注册,或者说您确信您错误地收到了这封邮件,还请忽视这封邮件,我们十分抱歉</p>
<p>此致,</p>
<p>Celery Soft 学术</p>
</div>
<div style="font-size: 13px; margin-top: 48px;">
<div>
顺便,我们觉得有必要让您知道,如果您自己没有在『Celery Soft 学术』进行注册的话,您收到这封邮件是因为有人在我们网站试图使用您的电子邮箱地址进行用户注册,如若没有您的同意,他是无法完成注册的,请您放心。
</div>
<div>
如果您想在『Celery Soft 学术』创建一个账号,请前往:
</div>
<div>
<a href="http://www.celerysoft.science/register/">http://www.celerysoft.science/register/</a>
</div>
<div>
如果还有任何疑问,可以完成注册之后再登录网站联系客服,谢谢。
</div>
</div>
</body>
</html>
'''
email_html = email_html.format(username, activate_url, activate_url)
title = '欢迎注册Celery Soft学术'
self.__send_email(send_to, title, email_html)
print('用户{}的验证电子邮箱地址的邮件已成功发送至{}'.format(username, send_to))
def send_activation_email_for_modifying_email_address(self, send_to, username, activate_url):
email_html = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>『Celery Soft 学术』更换电子邮箱地址</title>
</head>
<body>
<div style="font-size: 14px;">
<p>亲爱的{}:</p>
<p>您好,您申请更换在『Celery Soft 学术』的电子邮箱地址,只差最后一步,就能完成更换,在48小时内点击下面的链接即可:</p>
<p></p>
<div><a href="{}">{}</a></div>
<div>(如果链接无法点击,请将它拷贝到浏览器的地址栏中直接打开)</div>
<p>如果您不是『Celery Soft 学术』的会员,或者说您确信您错误地收到了这封邮件,还请忽视这封邮件,我们十分抱歉</p>
<p>此致,</p>
<p>Celery Soft 学术</p>
</div>
<div style="font-size: 13px; margin-top: 48px;">
<div>
顺便,我们觉得有必要让您知道,如果您自己没有在『Celery Soft 学术』进行过注册的话,您收到这封邮件是因为有人在我们网站试图将联系方式更改为您的电子邮箱地址,如若没有您的同意,他无法完成这个操作,请您放心。
</div>
<div>
如果您想在『Celery Soft 学术』创建一个账号,请前往:
</div>
<div>
<a href="http://www.celerysoft.science/register/">http://www.celerysoft.science/register/</a>
</div>
<div>
如果还有任何疑问,可以完成注册之后再登录网站联系客服,谢谢。
</div>
</div>
</body>
</html>
'''
email_html = email_html.format(username, activate_url, activate_url)
title = '修改在Celery Soft学术的电子邮箱地址'
self.__send_email(send_to, title, email_html)
print('用户{}修改电子邮箱地址的验证邮件已成功发送至{}'.format(username, send_to))
def send_reset_password_email(self, send_to, username, reset_password_url):
email_html = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>『Celery Soft 学术』重设密码</title>
</head>
<body>
<div style="font-size: 14px;">
<p>亲爱的{}:</p>
<p>您好,您申请重设在『Celery Soft 学术』的登录密码,只差最后一步,就能完成重设。在1小时内点击下面的链接即可:</p>
<p></p>
<div><a href="{}">{}</a></div>
<div>(如果链接无法点击,请将它拷贝到浏览器的地址栏中直接打开)</div>
<p>如果您没有在『Celery Soft 学术』申请重设登录密码,那我们严重怀疑您在『Celery Soft 学术』的密码已经泄漏,请您立即登录网站修改密码。</p>
<p>此致,</p>
<p>Celery Soft 学术</p>
</div>
</body>
</html>
'''
email_html = email_html.format(username, reset_password_url, reset_password_url)
title = '重设在Celery Soft学术的登录密码'
self.__send_email(send_to, title, email_html)
print('用户{}的重设密码邮件已成功发送至{}'.format(username, send_to))
def send_email(self, send_to, title, text):
self.__send_email(send_to, title, text)
print('邮件已成功发送至{}'.format(send_to))
def __send_email(self, send_to, title, text):
# 构建alternative结构
msg = MIMEMultipart('alternative')
msg['Subject'] = Header(title, charset='utf-8').encode()
msg['From'] = '%s <%s>' % (Header('Celery Soft学术', charset='utf-8').encode(), self.sending_address)
msg['To'] = send_to
msg['Reply-to'] = self.reply_to
msg['Message-id'] = email.utils.make_msgid()
msg['Date'] = email.utils.formatdate()
if text is not None:
text_to_attach = MIMEText(text, _subtype='html', _charset='UTF-8')
msg.attach(text_to_attach)
else:
# 构建alternative的text/plain部分
textplain = MIMEText('自定义TEXT纯文本部分', _subtype='plain', _charset='UTF-8')
msg.attach(textplain)
# 构建alternative的text/html部分
texthtml = MIMEText('自定义HTML超文本部分', _subtype='html', _charset='UTF-8')
msg.attach(texthtml)
# 发送邮件
try:
client = smtplib.SMTP_SSL(self.smtp_host, self.smtp_port)
client.login(self.sending_address, self.smtp_password)
client.sendmail(self.sending_address, send_to, msg.as_string())
client.quit()
except smtplib.SMTPConnectError as e:
print('邮件发送失败,连接失败:', e.smtp_code, e.smtp_error)
except smtplib.SMTPAuthenticationError as e:
print('邮件发送失败,认证错误:', e.smtp_code, e.smtp_error)
except smtplib.SMTPSenderRefused as e:
print('邮件发送失败,发件人被拒绝:', e.smtp_code, e.smtp_error)
except smtplib.SMTPRecipientsRefused as e:
print('邮件发送失败,收件人被拒绝:', e.smtp_code, e.smtp_error)
except smtplib.SMTPDataError as e:
print('邮件发送失败,数据接收拒绝:', e.smtp_code, e.smtp_error)
except smtplib.SMTPException as e:
print('邮件发送失败, ', e.message)
except Exception as e:
print('邮件发送异常, ', str(e))
toolkit = EmailToolkit()
|
StarcoderdataPython
|
3398716
|
#coding: utf-8
import unittest
from test_search import JOURNAL
from search import search_for
from tomd import parse_boldrow, entry2md, result2md, swedate
class TestBoldRowRegex(unittest.TestCase):
def test_no_math(self):
self.assertEqual(None, parse_boldrow('Hejsan hej'))
def test_math(self):
self.assertEqual(('git', 'ett dvcs program'), parse_boldrow('git - ett dvcs program'))
class TestEntry2md(unittest.TestCase):
def test_simple_entry(self):
entry = 'git co -b branch - skapa och checka ut ny branch.'
expected_md = '**git co -b branch** - skapa och checka ut ny branch.'
self.assertEqual(expected_md, entry2md(entry))
def test_entry_with_code_example(self):
entry = """
def fn(x, y):
return 2"""
expected_md = entry
self.assertEqual(expected_md.strip(), entry2md(entry).strip())
class TestNiceSwedishJournalDateFormatting(unittest.TestCase):
def test_sunday24th_of_march_2019(self):
self.assertEqual(u"Söndag 24e mars 2019", swedate(2019, 3, 24))
def test_tuesday2nd_of_april_2019(self):
self.assertEqual(u"Tisdag 2e april 2019", swedate(2019, 4, 2))
def test_monday1st_of_april_2019(self):
self.assertEqual(u"Måndag 1a april 2019", swedate(2019, 4, 1))
def test_sunday31st_of_march_2019(self):
self.assertEqual(u"Söndag 31a mars 2019", swedate(2019, 3, 31))
class TestSearchResult2md(unittest.TestCase):
def test_single_entry_found(self):
expected = u"""
Hittade "2019" i följande anteckningar
======================================
Tisdag 5e mars 2019
-------------------
**git branch** - lista branches
"""
result = search_for(u'2019', JOURNAL)
import sys
print(sys.version)
print(type(u''))
print(type(''))
got = result2md(result, u'2019')
self.assertEqual(expected.strip(), got.strip())
|
StarcoderdataPython
|
3273178
|
<filename>Misc/HiveMaker/solution_1.py
# The idea is that except if we have less than 11 sticks where the solution is trivial
# in every other case we create as much pairs of hexagons as possible. The addition of
# two pairs provides the best solution and also return to us 3 sticks. We repeat until
# no new pairs can be created. Then with less than 11 sticks we can add 2 new hexagons
# at best case.
def HiveMaker(sticks):
#If less than 11 then only 1
if sticks < 11:
return [1, 6, sticks-6]
hive = 0
initial = sticks
while True:
#A hex pair has 11 sticks
pairs = sticks // 11
#The remaining sticks
sticks = sticks % 11
#If no pairs
if pairs == 0:
#With 4 sticks we have a new hex
hive += sticks//4
sticks %= 4
#But we cannot do anything else
break
#When we add two pairs we get 3 sticks
additions = pairs - 1
sticks += additions * 3
#Add to hive
hive += pairs * 2
return [hive, initial-sticks, sticks]
print(HiveMaker(7))
print(HiveMaker(19))
print(HiveMaker(23))
print(HiveMaker(54))
|
StarcoderdataPython
|
3203110
|
<reponame>noob20000405/casbin-cpp<gh_stars>100-1000
# Copyright 2021 The casbin Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
relative_path = '../../'
basic_model_path = relative_path + 'examples/basic_model.conf'
basic_policy_path = relative_path + 'examples/basic_policy.csv'
rbac_model_path = relative_path + 'examples/rbac_model.conf'
rbac_policy_path = relative_path + 'examples/rbac_policy.csv'
rbac_with_resource_roles_model_path = relative_path + 'examples/rbac_with_resource_roles_model.conf'
rbac_with_resource_roles_policy_path = relative_path + 'examples/rbac_with_resource_roles_policy.csv'
rbac_with_domains_model_path = relative_path + 'examples/rbac_with_domains_model.conf'
rbac_with_domains_policy_path = relative_path + 'examples/rbac_with_domains_policy.csv'
keymatch_model_path = relative_path + 'examples/keymatch_model.conf'
keymatch_policy_path = relative_path + 'examples/keymatch_policy.csv'
rbac_with_deny_model_path = relative_path + 'examples/rbac_with_deny_model.conf'
rbac_with_deny_policy_path = relative_path + 'examples/rbac_with_deny_policy.csv'
priority_model_path = relative_path + 'examples/priority_model.conf'
priority_policy_path = relative_path + 'examples/priority_policy.csv'
basic_model_without_spaces_path = relative_path + 'examples/basic_model_without_spaces.conf'
testini_path = relative_path + 'casbin/config/testdata/testini.ini'
|
StarcoderdataPython
|
54440
|
#!/usr/bin/env python
import aptx
import argparse
import textwrap
import os.path
parser = argparse.ArgumentParser(
description='Print schema version of .aptx files.',
epilog='example: aptx_schemaver.py *.aptx')
parser.add_argument('aptxfiles', nargs='+', help='aptx file specification')
args = parser.parse_args()
dict = {}
for aptxfile in args.aptxfiles:
basename = os.path.basename(aptxfile)
rootname = basename.rstrip('.aptx')
try:
version = aptx.Proposal(aptxfile).schemaversion
dict[rootname] = version
except:
print('error reading schema version for {}'.format(aptx))
if len(dict) > 0:
versions = sorted(set(dict.values()))
for version in versions:
rootnames = [k for k,v in dict.items() if v == version]
out = 'schemaVersion=' + version + ': ' + ' '.join(rootnames)
for line in textwrap.wrap(out, width=78):
print(line)
|
StarcoderdataPython
|
131137
|
# coding=utf-8
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import warnings
import numpy as np
from deslib.base import BaseDS
from deslib.util.aggregation import majority_voting_rule
from deslib.util.diversity import Q_statistic, ratio_errors, \
negative_double_fault, compute_pairwise_diversity
from sklearn import metrics
from sklearn.base import ClusterMixin
from sklearn.cluster import KMeans
class DESClustering(BaseDS):
"""Dynamic ensemble selection-Clustering (DES-Clustering).
This method selects an ensemble of classifiers taking into account the
accuracy and diversity of the base classifiers. The K-means algorithm is
used to define the region of competence. For each cluster, the N most
accurate classifiers are first selected. Then, the J more diverse
classifiers from the N most accurate classifiers are selected to
compose the ensemble.
Parameters
----------
pool_classifiers : list of classifiers (Default = None)
The generated_pool of classifiers trained for the corresponding
classification problem. Each base classifiers should support the method
"predict". If None, then the pool of classifiers is a bagging
classifier.
clustering : sklearn.cluster (Default = None)
The clustering model used to estimate the region of competence.
If None, a KMeans with K = 5 is used.
pct_accuracy : float (Default = 0.5)
Percentage of base classifiers selected based on accuracy
pct_diversity : float (Default = 0.33)
Percentage of base classifiers selected based on diversity
more_diverse : Boolean (Default = True)
Whether we select the most or the least diverse classifiers
to add to the pre-selected ensemble
metric_diversity : String (Default = 'df')
Metric used to estimate the diversity of the base classifiers. Can be
either the double fault (df), Q-statistics (Q), or error correlation.
metric_performance : String (Default = 'accuracy_score')
Metric used to estimate the performance of a base classifier on a
cluster. Can be either any metric from sklearn.metrics.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
DSEL_perc : float (Default = 0.5)
Percentage of the input data used to fit DSEL.
Note: This parameter is only used if the pool of classifier is None or
unfitted.
n_jobs : int, default=-1
The number of parallel jobs to run. None means 1 unless in
a joblib.parallel_backend context. -1 means using all processors.
Doesn’t affect fit method.
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>.
"Using accuracy and more_diverse to select classifiers to build ensembles."
International Joint Conference on Neural Networks (IJCNN)., 2006.
<NAME>., <NAME>, and <NAME>. "Dynamic selection
of classifiers—a comprehensive review."
Pattern Recognition 47.11 (2014): 3665-3680.
<NAME>, <NAME>, and <NAME>, “Dynamic classifier
selection: Recent advances and perspectives,”
Information Fusion, vol. 41, pp. 195 – 216, 2018.
"""
def __init__(self, pool_classifiers=None, clustering=None, with_IH=False,
safe_k=None, IH_rate=0.30, pct_accuracy=0.5,
pct_diversity=0.33, more_diverse=True, metric_diversity='DF',
metric_performance='accuracy_score', n_clusters=5,
random_state=None, DSEL_perc=0.5, n_jobs=-1):
super(DESClustering, self).__init__(pool_classifiers=pool_classifiers,
with_IH=with_IH,
safe_k=safe_k,
IH_rate=IH_rate,
random_state=random_state,
DSEL_perc=DSEL_perc,
n_jobs=n_jobs)
self.metric_diversity = metric_diversity
self.metric_performance = metric_performance
self.clustering = clustering
self.pct_accuracy = pct_accuracy
self.pct_diversity = pct_diversity
self.more_diverse = more_diverse
self.n_clusters = n_clusters
def fit(self, X, y):
""" Train the DS model by setting the Clustering algorithm and
pre-processing the information required to apply the DS
methods.
First the data is divided into K clusters. Then, for each cluster,
the N most accurate classifiers are first selected. Then, the J more
diverse classifiers from the N most accurate classifiers are selected
to compose the ensemble of the corresponding cluster. An ensemble of
classifiers is assigned to each of the K clusters.
Parameters
----------
X : array of shape (n_samples, n_features)
Data used to fit the model.
y : array of shape (n_samples)
class labels of each example in X.
Returns
-------
self
"""
super(DESClustering, self).fit(X, y)
self.N_ = int(self.n_classifiers_ * self.pct_accuracy)
self.J_ = int(np.ceil(self.n_classifiers_ * self.pct_diversity))
self._check_parameters()
self.metric_classifier_ = getattr(metrics, self.metric_performance)
if self.clustering is None:
if self.n_samples_ >= self.n_clusters:
self.clustering_ = KMeans(n_clusters=self.n_clusters,
random_state=self.random_state,
n_jobs=self.n_jobs)
else:
warnings.warn("n_clusters is bigger than DSEL size. "
"Using All DSEL examples as cluster centroids.",
category=RuntimeWarning)
self.clustering_ = KMeans(n_clusters=self.n_samples_,
random_state=self.random_state)
self.clustering_.fit(self.DSEL_data_)
else:
self.clustering_ = self.clustering.fit(self.DSEL_data_)
# set the diversity metric used
self._set_diversity_func()
# Since the clusters are fixed, we can pre-compute the accuracy and
# diversity of each cluster as well as the # selected classifiers
# (indices) for each one. These pre-computed information will be kept
# on those three variables:
self.performance_cluster_ = np.zeros(
(self.clustering_.n_clusters, self.n_classifiers_))
self.diversity_cluster_ = np.zeros(
(self.clustering_.n_clusters, self.n_classifiers_))
self.indices_ = np.zeros((self.clustering_.n_clusters, self.J_),
dtype=int)
self._preprocess_clusters()
return self
def _preprocess_clusters(self):
"""Preprocess the competence as well as the average diversity of each
base classifier for each specific cluster.
This process makes the test routines faster, since the ensemble of
classifiers of each cluster is already predefined.
The class attributes Accuracy_cluster_ and diversity_cluster_ stores
the accuracy and diversity information respectively of each base
classifier for each cluster. The attribute indices_ stores the
pre-selected base classifiers for each cluster.
"""
labels = self.clustering_.predict(self.DSEL_data_)
for cluster_index in range(self.clustering_.n_clusters):
# Get the indices_ of the samples in the corresponding cluster.
sample_indices = np.where(labels == cluster_index)[0]
# Compute performance metric of each classifier in this cluster
score_classifier = self.get_scores_(sample_indices)
self.performance_cluster_[cluster_index, :] = score_classifier
# Get the N_ most accurate classifiers in the cluster
performance_indices = np.argsort(score_classifier)[::-1][0:self.N_]
# Get the target labels for the samples in the corresponding
# cluster for the diversity calculation.
targets = self.DSEL_target_[sample_indices]
self.diversity_cluster_[cluster_index, :] = \
compute_pairwise_diversity(targets,
self.BKS_DSEL_[sample_indices, :],
self.diversity_func_)
diversity_of_selected = self.diversity_cluster_[
cluster_index, performance_indices]
if self.more_diverse:
diversity_indices = np.argsort(diversity_of_selected)[::-1][
0:self.J_]
else:
diversity_indices = np.argsort(diversity_of_selected)[
0:self.J_]
self.indices_[cluster_index, :] = performance_indices[
diversity_indices]
def estimate_competence(self, query, predictions=None):
"""Get the competence estimates of each base classifier :math:`c_{i}`
for the classification of the query sample.
In this case, the competences were already pre-calculated for each
cluster. So this method computes the nearest cluster and get the
pre-calculated competences of the base classifiers for the
corresponding cluster.
Parameters
----------
query : array of shape (n_samples, n_features)
The query sample.
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifiers for all test examples.
Returns
-------
competences : array = [n_samples, n_classifiers]
The competence level estimated for each base classifier.
"""
cluster_index = self.clustering_.predict(query)
competences = self.performance_cluster_[cluster_index][:]
return competences
def select(self, query):
"""Select an ensemble with the most accurate and most diverse
classifier for the classification of the query.
The ensemble for each cluster was already pre-calculated in the fit
method. So, this method calculates the closest cluster, and returns
the ensemble associated to this cluster.
Parameters
----------
query : array of shape (n_samples, n_features)
The test examples.
Returns
-------
selected_classifiers : array of shape = [n_samples, self.k]
Indices of the selected base classifier for each test example.
"""
cluster_index = self.clustering_.predict(query)
selected_classifiers = self.indices_[cluster_index, :]
return selected_classifiers
def classify_with_ds(self, query, predictions, probabilities=None,
neighbors=None, distances=None, DFP_mask=None):
"""Predicts the label of the corresponding query sample.
Parameters
----------
query : array of shape = [n_features]
The test sample.
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifiers for all test examples.
probabilities : array of shape (n_samples, n_classifiers, n_classes)
Probabilities estimates of each base classifier for all test
examples.
neighbors : array of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors according for each test sample.
distances : array of shape (n_samples, n_neighbors)
Distances of the k nearest neighbors according for each test
sample.
DFP_mask : array of shape (n_samples, n_classifiers)
Mask containing 1 for the selected base classifier and 0 otherwise.
Returns
-------
predicted_label : array of shape (n_samples)
Predicted class label for each test example.
"""
if query.ndim < 2:
query = query.reshape(1, -1)
if predictions.ndim < 2:
predictions = predictions.reshape(1, -1)
if query.shape[0] != predictions.shape[0]:
raise ValueError(
'The arrays query and predictions must have the same number'
' of samples. query.shape is {}'
'and predictions.shape is {}'.format(query.shape,
predictions.shape))
selected_classifiers = self.select(query)
votes = predictions[
np.arange(predictions.shape[0])[:, None], selected_classifiers]
predicted_label = majority_voting_rule(votes)
return predicted_label
def predict_proba_with_ds(self, query, predictions, probabilities,
neighbors=None, distances=None, DFP_mask=None):
"""Predicts the label of the corresponding query sample.
Parameters
----------
query : array of shape (n_samples, n_features)
The test examples.
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifiers for all test examples.
probabilities : array of shape (n_samples, n_classifiers, n_classes)
Probabilities estimates of each base classifier for all test
examples.
neighbors : array of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors according for each test sample.
distances : array of shape (n_samples, n_neighbors)
Distances of the k nearest neighbors according for each test sample
DFP_mask : array of shape (n_samples, n_classifiers)
Mask containing 1 for the selected base classifier and 0 otherwise.
Returns
-------
predicted_proba : array of shape (n_samples, n_classes)
Posterior probabilities estimates for each test example.
"""
if query.shape[0] != probabilities.shape[0]:
raise ValueError(
'The arrays query and predictions must have the same number of'
' samples. query.shape is {}'
'and predictions.shape is {}'.format(query.shape,
predictions.shape))
selected_classifiers = self.select(query)
ensemble_proba = probabilities[
np.arange(probabilities.shape[0])[:, None],
selected_classifiers, :]
predicted_proba = np.mean(ensemble_proba, axis=1)
return predicted_proba
def _check_parameters(self):
"""Check if the parameters passed as argument are correct.
Raises
------
ValueError
If the hyper-parameters are incorrect.
"""
if self.metric_diversity not in ['DF', 'Q', 'ratio']:
raise ValueError(
'Diversity metric must be one of the following values:'
' "DF", "Q" or "Ratio"')
try:
getattr(metrics, self.metric_performance)
except AttributeError:
raise ValueError(
"Parameter metric_performance must be a sklearn metrics")
if self.N_ <= 0 or self.J_ <= 0:
raise ValueError("The values of N_ and J_ should be higher than 0"
"N_ = {}, J_= {} ".format(self.N_, self.J_))
if self.N_ < self.J_:
raise ValueError(
"The value of N_ should be greater or equals than J_"
"N_ = {}, J_= {} ".format(self.N_, self.J_))
if self.clustering is not None:
if not isinstance(self.clustering, ClusterMixin):
raise ValueError(
"Parameter clustering must be a sklearn"
" cluster estimator.")
def get_scores_(self, sample_indices):
def precision_function(label_predicted):
targets = self.DSEL_target_[sample_indices]
return self.metric_classifier_(targets, label_predicted)
label_predicted = self.BKS_DSEL_[sample_indices, :]
score_classifier = np.apply_along_axis(
precision_function, 0, label_predicted)
return score_classifier
def _set_diversity_func(self):
"""Set the diversity function to be used according to the
hyper-parameter metric_diversity
The diversity_func_ can be either the Double Fault, Q-Statistics
or Ratio of errors.
"""
if self.metric_diversity == 'DF':
self.diversity_func_ = negative_double_fault
elif self.metric_diversity == 'Q':
self.diversity_func_ = Q_statistic
else:
self.diversity_func_ = ratio_errors
|
StarcoderdataPython
|
152506
|
<filename>rss_reader/database/redis/__init__.py
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
from .base_set import MySettings
|
StarcoderdataPython
|
1784334
|
<gh_stars>0
# A lot of this code exists to deal w/ the broken ECS connect_to_region
# function, and will be removed once this pull request is accepted:
# https://github.com/boto/boto/pull/3143
import logging
logger = logging.getLogger(__name__)
from boto.regioninfo import get_regions
from boto.ec2containerservice.layer1 import EC2ContainerServiceConnection
def regions():
return get_regions("ec2containerservice",
connection_cls=EC2ContainerServiceConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
def create_clusters(region, namespace, mappings, parameters, **kwargs):
"""Creates ECS clusters.
Expects a "clusters" argument, which should contain a list of cluster
names to create.
"""
conn = connect_to_region(region)
try:
clusters = kwargs["clusters"]
except KeyError:
logger.error("setup_clusters hook missing \"clusters\" argument")
return False
if isinstance(clusters, basestring):
clusters = [clusters]
for cluster in clusters:
logger.debug("Creating ECS cluster: %s", cluster)
conn.create_cluster(cluster)
return True
|
StarcoderdataPython
|
69273
|
import pytest
from .context import mock, builtin_str
@pytest.fixture(scope='function')
def func():
def dummy(*agrs, **kwargs):
pass
f = mock.create_autospec(spec=dummy, name='fixture_function_to_decorate')
return f
@pytest.fixture(scope='function', autouse=True)
@mock.patch('{builtin}.open'.format(builtin=builtin_str))
def mocked_open(mock_open):
# mock_file = mock.Mock()
# mock_file.write.return_value = None
# mock_file.read.return_value = None
# mock_open.return_value = mock_file
return mock_open
|
StarcoderdataPython
|
10018
|
<reponame>Karoline0097/University-of-Michigan-Python-for-Everybody
## Problem 5: Extracting Data from JSON
# Example: http://py4e-data.dr-chuck.net/comments_42.json
# data consists of a number of names and comment counts in JSON
# {
# comments: [
# {
# name: "Matthias"
# count: 97
# },
# {
# name: "Geomer"
# count: 97
# }
# ...
# ]
# }
import urllib.request, urllib.parse, urllib.error
import json
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# prompt for a URL
url = input('Enter URL: ')
# handle for data
data_handle = urllib.request.urlopen(url, context=ctx)
# read the JSON data from that URL using urllib
# decode UTF 8 byte array to Unicode string
data = data_handle.read().decode()
# parse string containing json into structured object (-> JSON object / Python dictionary)
# data_js is dictionary
data_js = json.loads(data)
# compute the sum of the numbers in the file
number_sum = 0
# parse and extract the comment counts from the JSON data,
# data_js['comments'] is list of dictionaries
# print(data_js['comments'])
for user in data_js['comments']:
print('Name:', user['name'])
print('Count:', user['count'])
number_sum = number_sum + user['count']
# Example: Total count 2553
print('Total Count:', number_sum)
|
StarcoderdataPython
|
3299646
|
import xml.etree.ElementTree as ET
import pandas as pd
import datetime
import re
import pymongo
import csv
from time import time
import sys
import os
# This script parses the raw Stack Overflow data from a single giant >70 GB XML file,
# down into a MongoDB collection and a csv with the metadata.
def xml_iterator(filename):
"""Iterates through an XMLfile too big to fit into memory, returning a dictionary for each element."""
is_first = True
for event, elem in ET.iterparse(filename, events=("start", "end")):
if is_first:
# Get the root element. We need to clear this after every iteration to avoid memory leak.
root = elem
is_first = False
if event == 'start':
# We are only interested in 'end' events (i.e., after a chunk of data has been read)
continue
if elem.attrib:
yield elem.attrib
# Clear the data manually to avoid memory leak.
elem.clear()
root.clear()
def clean_text(text):
"""
Remove code blocks, urls, and html tags.
"""
text = re.sub(r'<code[^>]*>(.+?)</code\s*>', '', text, flags=re.DOTALL | re.MULTILINE)
text = re.sub(r'<div[^>]*>(.+?)</div\s*>', '', text, flags=re.DOTALL | re.MULTILINE)
text = re.sub(r'<blockquote[^>]*>(.+?)</blockquote\s*>', '', text, flags=re.DOTALL | re.MULTILINE)
text = re.sub('<.*?>', '', text)
text = text.replace('"', '"')
text = re.sub(r'http\S+', '', text)
text = re.sub(r'www.\S+', '', text)
return text
def convert_time(s):
return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.%f")
def split_tags(s):
tags = s.split('><')
tags[0] = tags[0][1:]
tags[-1] = tags[-1][:-1]
return tags
def prune_document(doc):
"""
Takes the dictonary of a post read from XML and takes only certain fields,
to be placed into the MongoDB.
"""
return {'_id': int(doc['Id']),
'CreationDate': convert_time(doc['CreationDate']),
'Score': int(doc['Score']),
'ViewCount': int(doc['ViewCount']),
'Body': clean_text(doc['Body']),
'Title': doc['Title'],
'Tags': split_tags(doc['Tags']),
'AnswerCount': int(doc['AnswerCount']),
'CommentCount': int(doc['CommentCount']),
'HasAcceptedAnswer': ('AcceptedAnswerId' in doc),
'Closed': ('ClosedDate' in doc)
}
# Name of the forum is passed as a command line argument.
forum = sys.argv[1]
print(f'Forum: {forum}')
os.chdir(forum)
# Batch for loading data in chunks
batch_size = 1000
# Metadata
metadata_cols = ['_id', 'CreationDate', 'Score', 'ViewCount', 'AnswerCount',
'CommentCount', 'HasAcceptedAnswer', 'Closed']
# Access the MongoDB to load the data into.
client = pymongo.MongoClient()
db = client.titlewave
mongo_posts = db[f'{forum}.posts']
# Clear any existing collection.
mongo_posts.drop()
# Parse the xml file into a MongoDB collection and a csv
filename = 'Posts.xml'
csv_filename = 'posts.csv'
print(os.getcwd())
dates = []
start_time = time()
i = 0
i_prev = 0
batch_dicts = []
with open(csv_filename, 'w', newline='') as csv_file:
csv_writer = csv.DictWriter(csv_file, metadata_cols, extrasaction='ignore')
csv_writer.writeheader()
for attrib in xml_iterator(filename):
if attrib['PostTypeId'] != '1':
# If the post isn't a question, skip it.
continue
# Preprocess the features in the dictionary.
attrib = prune_document(attrib)
batch_dicts.append(attrib)
i += 1
if i % batch_size == 0:
# Add to MongoDB
mongo_posts.insert_many(batch_dicts)
# Write to a csv (just the metadata features)
csv_writer.writerows(batch_dicts)
# Delete the batch to free up memory.
batch_dicts = []
# Whenever we get to a new year, print it to indicate progress.
d = attrib['CreationDate'].year
if not (d in dates):
posts_loaded = i - i_prev
duration = time() - start_time
rate = 10000 * duration / posts_loaded
print(f'{dates[-1]}: loaded {posts_loaded} posts in {duration:.2f} s ({rate} s per 10,000 posts)')
dates.append(d)
i_prev = i
start_time = time()
# Write the remaining data
if batch_dicts:
mongo_posts.insert_many(batch_dicts)
csv_writer.writerows(batch_dicts)
print('Creating indices...')
start_time = time()
# Index the MongoDB collection by creation date.
mongo_posts.create_index('CreationDate')
mongo_posts.create_index([('Tags', pymongo.TEXT)])
print(f'Duration: {time() - start_time:.2f} s')
|
StarcoderdataPython
|
3286744
|
"""
A simple script to encode all the images the XRCed needs into a Python module
"""
import sys, os, glob
from wx.tools import img2py
def main(filemask, output):
# get the list of PNG files
files = glob.glob(filemask)
files.sort()
# Truncate the inages module
open(output, 'w')
# call img2py on each file
for file in files:
# extract the basename to be used as the image name
name = os.path.splitext(os.path.basename(file))[0]
# encode it
if file == files[0]:
cmd = "-F -u -n %s %s %s" % (name, file, output)
else:
cmd = "-a -F -u -n %s %s %s" % (name, file, output)
img2py.main(cmd.split())
# Encode icons
files = glob.glob('src-images/*.ico')
files.sort()
for file in files:
# extract the basename to be used as the image name
name = os.path.splitext(os.path.basename(file))[0]
# encode it
cmd = "-a -F -i -u -n %s %s %s" % (name, file, output)
img2py.main(cmd.split())
if __name__ == "__main__":
main('src-images/*.png', 'images.py')
main('src-images/32x32/*.png', 'images_32x32.py')
|
StarcoderdataPython
|
3280067
|
<filename>scripts/dpdk_setup_ports.py
#! /bin/bash
"source" "find_python.sh" "--local"
"exec" "$PYTHON" "$0" "$@"
# hhaim
import sys
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
import os
python_ver = 'python%s' % sys.version_info[0]
yaml_path = os.path.join('external_libs', 'pyyaml-3.11', python_ver)
if yaml_path not in sys.path:
sys.path.append(yaml_path)
import yaml
import dpdk_nic_bind
import re
import argparse
import copy
import shlex
import traceback
from collections import defaultdict, OrderedDict
import subprocess
import platform
import stat
import time
import shutil
import signal
import glob
from dpdk_nic_bind import is_napatech
march = os.uname()[4]
# exit code is Important should be
# -1 : don't continue
# 0 : no errors - no need to load mlx share object
# 32 : no errors - mlx share object should be loaded
# 64 : no errors - napatech 3GD should be running
MLX_EXIT_CODE = 32
NTACC_EXIT_CODE = 64
class VFIOBindErr(Exception): pass
PATH_ARR = os.getenv('PATH', '').split(':')
for path in ['/usr/local/sbin', '/usr/sbin', '/sbin']:
if path not in PATH_ARR:
PATH_ARR.append(path)
os.environ['PATH'] = ':'.join(PATH_ARR)
def if_list_remove_sub_if(if_list):
return if_list
class ConfigCreator(object):
mandatory_interface_fields = ['Slot_str', 'Device_str', 'NUMA']
_2hex_re = '[\da-fA-F]{2}'
mac_re = re.compile('^({0}:){{5}}{0}$'.format(_2hex_re))
if march == 'ppc64le':
MAX_LCORE_NUM = 159
else:
MAX_LCORE_NUM = 63
# cpu_topology - dict: physical processor -> physical core -> logical processing unit (thread)
# interfaces - array of dicts per interface, should include "mandatory_interface_fields" values
def __init__(self, cpu_topology, interfaces, include_lcores = [], exclude_lcores = [], only_first_thread = False, zmq_rpc_port = None, zmq_pub_port = None, prefix = None, ignore_numa = False):
self.cpu_topology = copy.deepcopy(cpu_topology)
self.interfaces = copy.deepcopy(interfaces)
del cpu_topology
del interfaces
assert isinstance(self.cpu_topology, dict), 'Type of cpu_topology should be dict, got: %s' % type(self.cpu_topology)
assert len(self.cpu_topology.keys()) > 0, 'cpu_topology should contain at least one processor'
assert isinstance(self.interfaces, list), 'Type of interfaces should be list, got: %s' % type(list)
assert len(self.interfaces) % 2 == 0, 'Should be even number of interfaces, got: %s' % len(self.interfaces)
assert len(self.interfaces) >= 2, 'Should be at least two interfaces, got: %s' % len(self.interfaces)
assert isinstance(include_lcores, list), 'include_lcores should be list, got: %s' % type(include_lcores)
assert isinstance(exclude_lcores, list), 'exclude_lcores should be list, got: %s' % type(exclude_lcores)
assert len(self.interfaces) >= 2, 'Should be at least two interfaces, got: %s' % len(self.interfaces)
if only_first_thread:
for cores in self.cpu_topology.values():
for core in cores.keys():
cores[core] = cores[core][:1]
include_lcores = [int(x) for x in include_lcores]
exclude_lcores = [int(x) for x in exclude_lcores]
self.has_zero_lcore = False
self.lcores_per_numa = {}
total_lcores = 0
for numa, cores in self.cpu_topology.items():
self.lcores_per_numa[numa] = {'main': [], 'siblings': [], 'all': []}
for core, lcores in cores.items():
total_lcores += len(lcores)
for lcore in list(lcores):
if include_lcores and lcore not in include_lcores:
cores[core].remove(lcore)
if exclude_lcores and lcore in exclude_lcores:
cores[core].remove(lcore)
if lcore > self.MAX_LCORE_NUM:
cores[core].remove(lcore)
if 0 in lcores:
self.has_zero_lcore = True
lcores.remove(0)
self.lcores_per_numa[numa]['siblings'].extend(lcores)
else:
self.lcores_per_numa[numa]['main'].extend(lcores[:1])
self.lcores_per_numa[numa]['siblings'].extend(lcores[1:])
self.lcores_per_numa[numa]['all'].extend(lcores)
for interface in self.interfaces:
for mandatory_interface_field in ConfigCreator.mandatory_interface_fields:
if mandatory_interface_field not in interface:
raise DpdkSetup("Expected '%s' field in interface dictionary, got: %s" % (mandatory_interface_field, interface))
Device_str = self._verify_devices_same_type(self.interfaces)
if '100Gb' in Device_str:
self.speed = 100
elif '50Gb' in Device_str:
self.speed = 50
elif '40Gb' in Device_str:
self.speed = 40
elif '25Gb' in Device_str:
self.speed = 25
elif '20Gb' in Device_str:
self.speed = 20
else:
self.speed = 10
minimum_required_lcores = len(self.interfaces) // 2 + 2
if total_lcores < minimum_required_lcores:
raise DpdkSetup('Your system should have at least %s cores for %s interfaces, and it has: %s.' %
(minimum_required_lcores, len(self.interfaces), total_lcores))
interfaces_per_numa = defaultdict(int)
for i in range(0, len(self.interfaces), 2):
if self.interfaces[i]['Slot_str'] == 'dummy':
numa = self.interfaces[i+1]['NUMA']
other_if_numa = self.interfaces[i]['NUMA']
else:
numa = self.interfaces[i]['NUMA']
other_if_numa = self.interfaces[i+1]['NUMA']
if numa != other_if_numa and not ignore_numa and self.interfaces[i]['Slot_str'] != 'dummy' and self.interfaces[i+1]['Slot_str'] != 'dummy':
raise DpdkSetup('NUMA of each pair of interfaces should be the same. Got NUMA %s for client interface %s, NUMA %s for server interface %s' %
(numa, self.interfaces[i]['Slot_str'], self.interfaces[i+1]['NUMA'], self.interfaces[i+1]['Slot_str']))
interfaces_per_numa[numa] += 2
self.interfaces_per_numa = interfaces_per_numa
self.prefix = prefix
self.zmq_pub_port = zmq_pub_port
self.zmq_rpc_port = zmq_rpc_port
self.ignore_numa = ignore_numa
@staticmethod
def verify_mac(mac_string):
if not ConfigCreator.mac_re.match(mac_string):
raise DpdkSetup('MAC address should be in format of 12:34:56:78:9a:bc, got: %s' % mac_string)
return mac_string.lower()
@staticmethod
def _exit_if_bad_ip(ip):
if not ConfigCreator._verify_ip(ip):
raise DpdkSetup("Got bad IP %s" % ip)
@staticmethod
def _verify_ip(ip):
a = ip.split('.')
if len(a) != 4:
return False
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
return True
@staticmethod
def _verify_devices_same_type(interfaces_list):
Device_str = interfaces_list[0]['Device_str']
if Device_str == 'dummy':
return Device_str
for interface in interfaces_list:
if interface['Device_str'] == 'dummy':
continue
if Device_str != interface['Device_str']:
raise DpdkSetup('Interfaces should be of same type, got:\n\t* %s\n\t* %s' % (Device_str, interface['Device_str']))
return Device_str
def create_config(self, filename = None, print_config = False):
config_str = '### Config file generated by dpdk_setup_ports.py ###\n\n'
config_str += '- version: 2\n'
config_str += " interfaces: ['%s']\n" % "', '".join([interface['Slot_str'] + interface.get("sub_interface", "") for interface in self.interfaces])
if self.speed > 10:
config_str += ' port_bandwidth_gb: %s\n' % self.speed
if self.prefix:
config_str += ' prefix: %s\n' % self.prefix
if self.zmq_pub_port:
config_str += ' zmq_pub_port: %s\n' % self.zmq_pub_port
if self.zmq_rpc_port:
config_str += ' zmq_rpc_port: %s\n' % self.zmq_rpc_port
config_str += ' port_info:\n'
for index, interface in enumerate(self.interfaces):
if 'ip' in interface:
self._exit_if_bad_ip(interface['ip'])
self._exit_if_bad_ip(interface['def_gw'])
config_str += ' '*6 + '- ip: %s\n' % interface['ip']
config_str += ' '*8 + 'default_gw: %s\n' % interface['def_gw']
else:
config_str += ' '*6 + '- dest_mac: %s' % self.verify_mac(interface['dest_mac'])
if interface.get('loopback_dest'):
config_str += " # MAC OF LOOPBACK TO IT'S DUAL INTERFACE\n"
else:
config_str += '\n'
config_str += ' '*8 + 'src_mac: %s\n' % self.verify_mac(interface['src_mac'])
if index % 2:
config_str += '\n' # dual if barrier
if not self.ignore_numa:
config_str += ' platform:\n'
if len(self.interfaces_per_numa.keys()) == 1 and -1 in self.interfaces_per_numa: # VM, use any cores
lcores_pool = sorted([lcore for lcores in self.lcores_per_numa.values() for lcore in lcores['all']])
config_str += ' '*6 + 'master_thread_id: %s\n' % (0 if self.has_zero_lcore else lcores_pool.pop(0))
config_str += ' '*6 + 'latency_thread_id: %s\n' % lcores_pool.pop(0)
lcores_per_dual_if = int(len(lcores_pool) * 2 / len(self.interfaces))
config_str += ' '*6 + 'dual_if:\n'
for i in range(0, len(self.interfaces), 2):
lcores_for_this_dual_if = list(map(str, sorted(lcores_pool[:lcores_per_dual_if])))
lcores_pool = lcores_pool[lcores_per_dual_if:]
if not lcores_for_this_dual_if:
raise DpdkSetup('lcores_for_this_dual_if is empty (internal bug, please report with details of setup)')
config_str += ' '*8 + '- socket: 0\n'
config_str += ' '*10 + 'threads: [%s]\n\n' % ','.join(lcores_for_this_dual_if)
else:
# we will take common minimum among all NUMAs, to satisfy all
lcores_per_dual_if = 99
extra_lcores = 1 if self.has_zero_lcore else 2
# worst case 3 iterations, to ensure master and "rx" have cores left
while (lcores_per_dual_if * sum(self.interfaces_per_numa.values()) / 2) + extra_lcores > sum([len(lcores['all']) for lcores in self.lcores_per_numa.values()]):
lcores_per_dual_if -= 1
for numa, lcores_dict in self.lcores_per_numa.items():
if not self.interfaces_per_numa[numa]:
continue
lcores_per_dual_if = min(lcores_per_dual_if, int(2 * len(lcores_dict['all']) / self.interfaces_per_numa[numa]))
lcores_pool = copy.deepcopy(self.lcores_per_numa)
# first, allocate lcores for dual_if section
dual_if_section = ' '*6 + 'dual_if:\n'
for i in range(0, len(self.interfaces), 2):
if self.interfaces[i]['Device_str'] == 'dummy':
numa = self.interfaces[i+1]['NUMA']
else:
numa = self.interfaces[i]['NUMA']
dual_if_section += ' '*8 + '- socket: %s\n' % numa
lcores_for_this_dual_if = lcores_pool[numa]['all'][:lcores_per_dual_if]
lcores_pool[numa]['all'] = lcores_pool[numa]['all'][lcores_per_dual_if:]
for lcore in lcores_for_this_dual_if:
if lcore in lcores_pool[numa]['main']:
lcores_pool[numa]['main'].remove(lcore)
elif lcore in lcores_pool[numa]['siblings']:
lcores_pool[numa]['siblings'].remove(lcore)
else:
raise DpdkSetup('lcore not in main nor in siblings list (internal bug, please report with details of setup)')
if not lcores_for_this_dual_if:
raise DpdkSetup('Not enough cores at NUMA %s. This NUMA has %s processing units and %s interfaces.' % (numa, len(self.lcores_per_numa[numa]), self.interfaces_per_numa[numa]))
dual_if_section += ' '*10 + 'threads: [%s]\n\n' % ','.join(list(map(str, sorted(lcores_for_this_dual_if))))
# take the cores left to master and rx
mains_left = [lcore for lcores in lcores_pool.values() for lcore in lcores['main']]
siblings_left = [lcore for lcores in lcores_pool.values() for lcore in lcores['siblings']]
if mains_left:
rx_core = mains_left.pop(0)
else:
rx_core = siblings_left.pop(0)
if self.has_zero_lcore:
master_core = 0
elif mains_left:
master_core = mains_left.pop(0)
else:
master_core = siblings_left.pop(0)
config_str += ' '*6 + 'master_thread_id: %s\n' % master_core
config_str += ' '*6 + 'latency_thread_id: %s\n' % rx_core
# add the dual_if section
config_str += dual_if_section
# verify config is correct YAML format
try:
yaml.safe_load(config_str)
except Exception as e:
raise DpdkSetup('Could not create correct yaml config.\nGenerated YAML:\n%s\nEncountered error:\n%s' % (config_str, e))
if print_config:
print(config_str)
if filename:
if os.path.exists(filename):
if not dpdk_nic_bind.confirm('File %s already exist, overwrite? (y/N)' % filename):
print('Skipping.')
return config_str
with open(filename, 'w') as f:
f.write(config_str)
print('Saved to %s.' % filename)
return config_str
# only load igb_uio if it's available
def load_igb_uio():
loaded_mods = dpdk_nic_bind.get_loaded_modules()
if 'igb_uio' in loaded_mods:
return True
if 'uio' not in loaded_mods:
ret = os.system('modprobe uio')
if ret:
return False
km = './ko/%s/igb_uio.ko' % dpdk_nic_bind.kernel_ver
if os.path.exists(km):
return os.system('insmod %s' % km) == 0
# try to compile igb_uio if it's missing
def compile_and_load_igb_uio():
loaded_mods = dpdk_nic_bind.get_loaded_modules()
if 'igb_uio' in loaded_mods:
return
if 'uio' not in loaded_mods:
ret = os.system('modprobe uio')
if ret:
print('Failed inserting uio module, please check if it is installed')
sys.exit(-1)
km = './ko/%s/igb_uio.ko' % dpdk_nic_bind.kernel_ver
if not os.path.exists(km):
print("ERROR: We don't have precompiled igb_uio.ko module for your kernel version")
print('Will try compiling automatically...')
build_path = '/tmp/trex-ko'
ret = os.system('mkdir -p %s' % build_path)
assert not ret, 'Makedirs failed'
ret = os.system('chmod -R 755 %s' % build_path)
assert not ret, 'chmod failed'
build_src_path = build_path + '/src'
shutil.rmtree(build_src_path, ignore_errors = True)
shutil.copytree('./ko/src', build_src_path)
try:
subprocess.check_output('make', cwd = build_src_path, stderr = subprocess.STDOUT, universal_newlines = True)
subprocess.check_output(['make', 'install'], cwd = build_src_path, stderr = subprocess.STDOUT, universal_newlines = True)
print('Success.\n')
except subprocess.CalledProcessError as e:
print('\n ERROR: Automatic compilation failed (return code: %s)' % e.returncode)
print(' Output:\n %s' % '\n '.join(e.output.splitlines()))
print('\nYou can try compiling yourself, using the following commands:')
print(' $mkdir -p /tmp/trex-ko')
print(' $cp -r ./ko/src /tmp/trex-ko')
print(' $cd /tmp/trex-ko/src')
print(' $make')
print(' $make install')
print(' $cd -')
print('Then, try to run TRex again.')
print('Note: you might need additional Linux packages for that:')
print(' * yum based (Fedora, CentOS, RedHat):')
print(' sudo yum install kernel-devel-`uname -r`')
print(' sudo yum group install "Development tools"')
print(' * apt based (Ubuntu):')
print(' sudo apt install linux-headers-`uname -r` build-essential')
sys.exit(-1)
km = os.path.join(build_path, dpdk_nic_bind.kernel_ver, 'igb_uio.ko')
ret = os.system('insmod %s' % km)
if ret:
print('Failed inserting igb_uio module')
sys.exit(-1)
class map_driver(object):
args=None;
cfg_file='/etc/trex_cfg.yaml'
parent_args = None
def pa():
return map_driver.parent_args
class DpdkSetup(Exception):
pass
class CIfMap:
def __init__(self, cfg_file):
self.m_cfg_file =cfg_file;
self.m_cfg_dict={};
self.m_devices={};
self.m_is_mellanox_mode=False;
def dump_error (self,err):
s="""%s
From this TRex version a configuration file must exist in /etc/ folder "
The name of the configuration file should be /etc/trex_cfg.yaml "
The minimum configuration file should include something like this
- version : 2 # version 2 of the configuration file
interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] # list of the interfaces to bind run ./dpdk_nic_bind.py --status to see the list
port_limit : 2 # number of ports to use valid is 2,4,6,8,10,12
example of already bind devices
$ ./dpdk_nic_bind.py --status
Network devices using DPDK-compatible driver
============================================
0000:03:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
0000:03:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
0000:13:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
0000:13:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
Network devices using kernel driver
===================================
0000:02:00.0 '82545EM Gigabit Ethernet Controller (Copper)' if=eth2 drv=e1000 unused=igb_uio *Active*
Other network devices
=====================
""" % (err);
return s;
def raise_error (self,err):
s= self.dump_error (err)
raise DpdkSetup(s)
def set_only_mellanox_nics(self):
self.m_is_mellanox_mode=True;
def get_only_mellanox_nics(self):
return self.m_is_mellanox_mode
def read_pci (self,pci_id,reg_id):
out=subprocess.check_output(['setpci', '-s',pci_id, '%s.w' %(reg_id)])
out=out.decode(errors='replace');
return (out.strip());
def write_pci (self,pci_id,reg_id,val):
out=subprocess.check_output(['setpci','-s',pci_id, '%s.w=%s' %(reg_id,val)])
out=out.decode(errors='replace');
return (out.strip());
def tune_mlx_device (self,pci_id):
# set PCIe Read to 4K and not 512 ... need to add it to startup s
val=self.read_pci (pci_id,68)
if val[0]=='0':
#hypervisor does not give the right to write to this register
return;
if val[0]!='5':
val='5'+val[1:]
self.write_pci (pci_id,68,val)
assert(self.read_pci (pci_id,68)==val);
def get_mtu_mlx (self,dev_id):
if len(dev_id)>0:
try:
out=subprocess.check_output(['ifconfig', dev_id])
except Exception as e:
raise DpdkSetup(' "ifconfig %s" utility does not works, try to install it using "$yum install net-tools -y" on CentOS system' %(dev_id) )
out=out.decode(errors='replace');
obj=re.search(r'MTU:(\d+)',out,flags=re.MULTILINE|re.DOTALL);
if obj:
return int(obj.group(1));
else:
obj=re.search(r'mtu (\d+)',out,flags=re.MULTILINE|re.DOTALL);
if obj:
return int(obj.group(1));
else:
return -1
def set_mtu_mlx (self,dev_id,new_mtu):
if len(dev_id)>0:
out=subprocess.check_output(['ifconfig', dev_id,'mtu',str(new_mtu)])
out=out.decode(errors='replace');
def set_max_mtu_mlx_device(self,dev_id):
mtu=9*1024+22
dev_mtu=self.get_mtu_mlx (dev_id);
if (dev_mtu>0) and (dev_mtu!=mtu):
self.set_mtu_mlx(dev_id,mtu);
if self.get_mtu_mlx(dev_id) != mtu:
print("Could not set MTU to %d" % mtu)
sys.exit(-1);
def disable_flow_control_mlx_device (self,dev_id):
if len(dev_id)>0:
my_stderr = open("/dev/null","wb")
cmd ='ethtool -A '+dev_id + ' rx off tx off '
subprocess.call(cmd, stdout=my_stderr,stderr=my_stderr, shell=True)
my_stderr.close();
def check_ofed_version (self):
ofed_info='/usr/bin/ofed_info'
ofed_ver_re = re.compile('.*[-](\d)[.](\d)[-].*')
ofed_ver = 46
ofed_ver_show = '4.6'
if not os.path.isfile(ofed_info):
print("OFED %s is not installed on this setup" % ofed_info)
sys.exit(-1);
try:
out = subprocess.check_output([ofed_info])
except Exception as e:
print("OFED %s can't run " % (ofed_info))
sys.exit(-1);
lines=out.splitlines();
if len(lines)>1:
m= ofed_ver_re.match(str(lines[0]))
if m:
ver=int(m.group(1))*10+int(m.group(2))
if ver < ofed_ver:
print("installed OFED version is '%s' should be at least '%s' and up" % (lines[0],ofed_ver_show))
sys.exit(-1);
else:
print("not found valid OFED version '%s' " % (lines[0]))
sys.exit(-1);
def verify_ofed_os(self):
err_msg = 'Warning: Mellanox NICs where tested only with RedHat/CentOS 7.6\n'
err_msg += 'Correct usage with other Linux distributions is not guaranteed.'
try:
dist = platform.dist()
if dist[0] not in ('redhat', 'centos') or not dist[1].startswith('7.6'):
print(err_msg)
except Exception as e:
print('Error while determining OS type: %s' % e)
def load_config_file (self):
fcfg=self.m_cfg_file
if not os.path.isfile(fcfg) :
self.raise_error ("There is no valid configuration file %s\n" % fcfg)
try:
stream = open(fcfg, 'r')
self.m_cfg_dict= yaml.safe_load(stream)
except Exception as e:
print(e);
raise e
stream.close();
cfg_dict = self.m_cfg_dict[0]
if 'version' not in cfg_dict:
raise DpdkSetup("Configuration file %s is old, it should include version field\n" % fcfg )
if int(cfg_dict['version'])<2 :
raise DpdkSetup("Configuration file %s is old, expected version 2, got: %s\n" % (fcfg, cfg_dict['version']))
if 'interfaces' not in self.m_cfg_dict[0]:
raise DpdkSetup("Configuration file %s is old, it should include interfaces field with even number of elements" % fcfg)
if_list= if_list_remove_sub_if(self.m_cfg_dict[0]['interfaces']);
l=len(if_list);
if l > 24:
raise DpdkSetup("Configuration file %s should include interfaces field with maximum 24 elements, got: %s." % (fcfg,l))
if l % 2:
raise DpdkSetup("Configuration file %s should include even number of interfaces, got: %s" % (fcfg,l))
if 'port_limit' in cfg_dict:
if cfg_dict['port_limit'] > len(if_list):
raise DpdkSetup('Error: port_limit should not be higher than number of interfaces in config file: %s\n' % fcfg)
if cfg_dict['port_limit'] % 2:
raise DpdkSetup('Error: port_limit in config file must be even number, got: %s\n' % cfg_dict['port_limit'])
if cfg_dict['port_limit'] <= 0:
raise DpdkSetup('Error: port_limit in config file must be positive number, got: %s\n' % cfg_dict['port_limit'])
if pa() and pa().limit_ports is not None and pa().limit_ports > len(if_list):
raise DpdkSetup('Error: --limit-ports CLI argument (%s) must not be higher than number of interfaces (%s) in config file: %s\n' % (pa().limit_ports, len(if_list), fcfg))
def do_bind_all(self, drv, pci, force = False):
assert type(pci) is list
cmd = '{ptn} dpdk_nic_bind.py --bind={drv} {pci} {frc}'.format(
ptn = sys.executable,
drv = drv,
pci = ' '.join(pci),
frc = '--force' if force else '')
print(cmd)
return os.system(cmd)
# pros: no need to compile .ko per Kernel version
# cons: need special config/hw (not always works)
def try_bind_to_vfio_pci(self, to_bind_list):
krnl_params_file = '/proc/cmdline'
if not os.path.exists(krnl_params_file):
raise VFIOBindErr('Could not find file with Kernel boot parameters: %s' % krnl_params_file)
with open(krnl_params_file) as f:
krnl_params = f.read()
# IOMMU is always enabled on Power systems
if march != 'ppc64le' and 'iommu=' not in krnl_params:
raise VFIOBindErr('vfio-pci is not an option here')
if 'vfio_pci' not in dpdk_nic_bind.get_loaded_modules():
ret = os.system('modprobe vfio_pci')
if ret:
raise VFIOBindErr('Could not load vfio_pci')
ret = self.do_bind_all('vfio-pci', to_bind_list)
if ret:
raise VFIOBindErr('Binding to vfio_pci failed')
def pci_name_to_full_name (self,pci_name):
if pci_name == 'dummy':
return pci_name
c='[0-9A-Fa-f]';
sp='[:]'
s_short=c+c+sp+c+c+'[.]'+c;
s_full=c+c+c+c+sp+s_short
re_full = re.compile(s_full)
re_short = re.compile(s_short)
if re_short.match(pci_name):
return '0000:'+pci_name
if re_full.match(pci_name):
return pci_name
err=" %s is not a valid pci address \n" %pci_name;
raise DpdkSetup(err)
def run_dpdk_lspci (self):
dpdk_nic_bind.get_nic_details()
self.m_devices= dpdk_nic_bind.devices
def get_prefix(self):
if pa().prefix:
return pa().prefix
return self.m_cfg_dict[0].get('prefix', '')
def preprocess_astf_file_if_needed(self):
""" check if we are in astf batch mode, in case we are convert the profile to json in tmp"""
if not pa() or not pa().astf or pa().interactive:
return
input_file = pa().file
if not input_file:
return
instance_name = ''
prefix = self.get_prefix()
if prefix:
instance_name = '-' + prefix
dst_json_file = "/tmp/astf{instance}.json".format(instance=instance_name)
extension = os.path.splitext(input_file)[1]
if extension == '.json':
shutil.copyfile(input_file, dst_json_file)
os.chmod(dst_json_file, 0o777)
return
elif extension != '.py':
raise DpdkSetup('ERROR when running with --astf mode, you need to have a new Python profile format (.py) and not YAML')
print('converting astf profile %s to json %s' % (input_file, dst_json_file))
# imports from trex.astf
cur_path = os.path.abspath(os.path.dirname(__file__))
trex_path = os.path.join(cur_path, 'automation', 'trex_control_plane', 'interactive')
if trex_path not in sys.path:
sys.path.insert(1, trex_path)
from trex.astf.trex_astf_profile import ASTFProfile
from trex.astf.sim import decode_tunables
tunables = {}
if pa().tunable:
tunables = decode_tunables(pa().tunable)
try:
profile = ASTFProfile.load(input_file, **tunables)
json_content = profile.to_json_str()
except Exception as e:
raise DpdkSetup('ERROR: Could not convert astf profile to JSON:\n%s' % e)
with open(dst_json_file, 'w') as f:
f.write(json_content)
os.chmod(dst_json_file, 0o777)
def verify_stf_file(self):
""" check the input file of STF """
if not pa() or not pa().file or pa().astf:
return
extension = os.path.splitext(pa().file)[1]
if extension == '.py':
raise DpdkSetup('ERROR: Python files can not be used with STF mode, did you forget "--astf" flag?')
elif extension != '.yaml':
pass # should we fail here?
def is_hugepage_file_exits(self,socket_id):
t = ['2048','1048576']
for obj in t:
filename = '/sys/devices/system/node/node{}/hugepages/hugepages-{}kB/nr_hugepages'.format(socket_id,obj)
if os.path.isfile(filename):
return (True,filename,int(obj))
return (False,None,None)
def config_hugepages(self, wanted_count = None):
mount_output = subprocess.check_output('mount', stderr = subprocess.STDOUT).decode(errors='replace')
if 'hugetlbfs' not in mount_output:
huge_mnt_dir = '/mnt/huge'
if not os.path.isdir(huge_mnt_dir):
print("Creating huge node")
os.makedirs(huge_mnt_dir)
os.system('mount -t hugetlbfs nodev %s' % huge_mnt_dir)
for socket_id in range(2):
r = self.is_hugepage_file_exits(socket_id)
if not r[0]:
if socket_id == 0:
print('WARNING: hugepages config file does not exist!')
continue
if wanted_count is None:
if self.m_cfg_dict[0].get('low_end', False):
if socket_id == 0:
if pa() and pa().limit_ports:
if_count = pa().limit_ports
else:
if_count = self.m_cfg_dict[0].get('port_limit', len(self.m_cfg_dict[0]['interfaces']))
wanted_count = 20 + 40 * if_count
else:
wanted_count = 1 # otherwise, DPDK will not be able to see the device
else:
wanted_count = 2048
if r[2] > 2048:
wanted_count = wanted_count / 1024
if wanted_count < 1 :
wanted_count = 1
filename = r[1]
with open(filename) as f:
configured_hugepages = int(f.read())
if configured_hugepages < wanted_count:
os.system('echo %d > %s' % (wanted_count, filename))
time.sleep(0.1)
with open(filename) as f: # verify
configured_hugepages = int(f.read())
if configured_hugepages < wanted_count:
print('WARNING: tried to configure %d hugepages for socket %d, but result is: %d' % (wanted_count, socket_id, configured_hugepages))
def run_servers(self):
''' Run both scapy & bird server according to pa'''
if not pa():
return
try:
master_core = self.m_cfg_dict[0]['platform']['master_thread_id']
except:
master_core = 0
if should_scapy_server_run():
ret = os.system('{sys_exe} general_daemon_server restart -c {cores} -n {name} --py -e "{exe}" -r -d -i'.format(sys_exe=sys.executable,
cores=master_core,
name='Scapy',
exe='-m trex.scapy_server.scapy_zmq_server'))
if ret:
print("Could not start scapy daemon server, which is needed by GUI to create packets.\nIf you don't need it, use --no-scapy-server flag.")
sys.exit(-1)
if pa().bird_server:
ret = os.system('{sys_exe} general_daemon_server restart -n {name} --py -e "{exe}" -i'.format(sys_exe=sys.executable,
name='PyBird',
exe='-m trex.pybird_server.pybird_zmq_server'))
if ret:
print("Could not start bird server\nIf you don't need it, don't use --bird-server flag.")
sys.exit(-1)
if pa().emu:
emu_zmq_tcp_flag = '--emu-zmq-tcp' if pa().emu_zmq_tcp else ''
exe = './trex-emu {emu_zmq_tcp}'.format(emu_zmq_tcp = emu_zmq_tcp_flag)
ret = os.system('{sys_exe} general_daemon_server restart -n {name} --sudo -e "{exe}"'.format(sys_exe=sys.executable,
name='Emu',
exe=exe))
if ret:
print("Could not start emu service\nIf you don't need it, don't use -emu flag.")
sys.exit(-1)
# check vdev Linux interfaces status
# return True if interfaces are vdev
def check_vdev(self, if_list):
if not if_list:
return
af_names = []
ifname_re = re.compile('iface\s*=\s*([^\s,]+)')
found_vdev = False
found_pdev = False
for iface in if_list:
if iface == 'dummy':
continue
elif '--vdev' in iface:
found_vdev = True
if 'net_af_packet' in iface:
res = ifname_re.search(iface)
if res:
af_names.append(res.group(1))
elif ':' not in iface: # no PCI => assume af_packet
found_vdev = True
af_names.append(iface)
else:
found_pdev = True
if found_vdev:
if found_pdev:
raise DpdkSetup('You have mix of vdev and pdev interfaces in config file!')
for name in af_names:
if not os.path.exists('/sys/class/net/%s' % name):
raise DpdkSetup('ERROR: Could not find Linux interface %s.' % name)
oper_state = '/sys/class/net/%s/operstate' % name
if os.path.exists(oper_state):
with open(oper_state) as f:
f_cont = f.read().strip()
if f_cont in ('down', 'DOWN'):
raise DpdkSetup('ERROR: Requested Linux interface %s is DOWN.' % name)
return found_vdev
def check_trex_running(self, if_list):
if if_list and map_driver.args.parent and self.m_cfg_dict[0].get('enable_zmq_pub', True):
publisher_port = self.m_cfg_dict[0].get('zmq_pub_port', 4500)
pid = dpdk_nic_bind.get_tcp_port_usage(publisher_port)
if pid:
cmdline = dpdk_nic_bind.read_pid_cmdline(pid)
print('ZMQ port is used by following process:\npid: %s, cmd: %s' % (pid, cmdline))
sys.exit(-1)
# verify that all interfaces of i40e NIC are in use by current instance of TRex
def check_i40e_binds(self, if_list):
# i40e device IDs taked from dpdk/drivers/net/i40e/base/i40e_devids.h
i40e_device_ids = [0x1572, 0x1574, 0x1580, 0x1581, 0x1583, 0x1584, 0x1585, 0x1586, 0x1587, 0x1588, 0x1589, 0x158A, 0x158B]
iface_without_slash = set()
for iface in if_list:
iface_without_slash.add(self.split_pci_key(iface))
show_warning_devices = set()
unbind_devices = set()
for iface in iface_without_slash:
if iface == 'dummy':
continue
iface = self.split_pci_key(iface)
if self.m_devices[iface]['Device'] not in i40e_device_ids: # not i40e
return
iface_pci = iface.split('.')[0]
for device in self.m_devices.values():
if device['Slot'] in iface_without_slash: # we use it
continue
if iface_pci == device['Slot'].split('.')[0]:
if device.get('Driver_str') == 'i40e':
if pa() and pa().unbind_unused_ports:
# if --unbind-unused-ports is set we unbind ports that are not
# used by TRex
unbind_devices.add(device['Slot'])
else:
print('ERROR: i40e interface %s is under Linux and will interfere with TRex interface %s' % (device['Slot'], iface))
print('See following link for more information: https://trex-tgn.cisco.com/youtrack/issue/trex-528')
print('Unbind the interface from Linux with following command:')
print(' sudo ./dpdk_nic_bind.py -u %s' % device['Slot'])
print('')
sys.exit(-1)
if device.get('Driver_str') in dpdk_nic_bind.dpdk_drivers:
show_warning_devices.add(device['Slot'])
for dev in show_warning_devices:
print('WARNING: i40e interface %s is under DPDK driver and might interfere with current TRex interfaces.' % dev)
if unbind_devices:
print('Unbinding unused i40e interfaces: %s' % unbind_devices)
dpdk_nic_bind.unbind_all(unbind_devices, force=True)
def do_run (self, only_check_all_mlx=False):
""" returns code that specifies if interfaces are Mellanox/Napatech etc. """
self.load_config_file()
self.preprocess_astf_file_if_needed()
self.verify_stf_file()
if not pa() or pa().dump_interfaces is None or (pa().dump_interfaces == [] and pa().cfg):
if_list = if_list_remove_sub_if(self.m_cfg_dict[0]['interfaces'])
else:
if_list = pa().dump_interfaces
if not if_list:
self.run_dpdk_lspci()
for dev in self.m_devices.values():
if dev.get('Driver_str') in dpdk_nic_bind.dpdk_drivers + dpdk_nic_bind.dpdk_and_kernel:
if_list.append(dev['Slot'])
if self.check_vdev(if_list):
self.check_trex_running(if_list)
self.run_servers()
# no need to config hugepages
return
self.run_dpdk_lspci()
if_list = list(map(self.pci_name_to_full_name, if_list))
Broadcom_cnt=0;
# check how many mellanox cards we have
Mellanox_cnt=0;
dummy_cnt=0
for key in if_list:
if key == 'dummy':
dummy_cnt += 1
continue
key = self.split_pci_key(key)
if key not in self.m_devices:
err=" %s does not exist " %key;
raise DpdkSetup(err)
if 'Vendor_str' not in self.m_devices[key]:
err=" %s does not have Vendor_str " %key;
raise DpdkSetup(err)
if 'Mellanox' in self.m_devices[key]['Vendor_str']:
Mellanox_cnt += 1
if 'Broadcom' in self.m_devices[key]['Vendor_str']:
Broadcom_cnt += 1
if not (pa() and pa().dump_interfaces):
if (Mellanox_cnt > 0) and ((Mellanox_cnt + dummy_cnt) != len(if_list)):
err = "All driver should be from one vendor. You have at least one driver from Mellanox but not all."
raise DpdkSetup(err)
if Mellanox_cnt > 0:
self.set_only_mellanox_nics()
if self.get_only_mellanox_nics():
if not pa().no_ofed_check:
self.verify_ofed_os()
self.check_ofed_version()
for key in if_list:
if key == 'dummy':
continue
if pa().no_ofed_check: # in case of no-ofed don't optimized for Azure
continue
key = self.split_pci_key(key)
if 'Virtual' not in self.m_devices[key]['Device_str']:
pci_id = self.m_devices[key]['Slot_str']
self.tune_mlx_device(pci_id)
if 'Interface' in self.m_devices[key]:
dev_ids = self.m_devices[key]['Interface'].split(",")
for dev_id in dev_ids:
self.disable_flow_control_mlx_device (dev_id)
self.set_max_mtu_mlx_device(dev_id)
if only_check_all_mlx:
if Mellanox_cnt > 0:
sys.exit(MLX_EXIT_CODE);
else:
sys.exit(0);
self.check_i40e_binds(if_list)
self.check_trex_running(if_list)
self.config_hugepages() # should be after check of running TRex
self.run_servers()
Napatech_cnt=0;
to_bind_list = []
for key in if_list:
if key == 'dummy':
continue
key = self.split_pci_key(key)
if key not in self.m_devices:
err=" %s does not exist " %key;
raise DpdkSetup(err)
if (is_napatech(self.m_devices[key])):
# These adapters doesn't need binding
Napatech_cnt += 1
continue
if self.m_devices[key].get('Driver_str') not in (dpdk_nic_bind.dpdk_drivers + dpdk_nic_bind.dpdk_and_kernel):
to_bind_list.append(key)
if Napatech_cnt:
# This is currently a hack needed until the DPDK NTACC PMD can do proper
# cleanup.
os.system("ipcs | grep 2117a > /dev/null && ipcrm shm `ipcs | grep 2117a | cut -d' ' -f2` > /dev/null")
if to_bind_list:
if Mellanox_cnt:
ret = self.do_bind_all('mlx5_core', to_bind_list)
if ret:
ret = self.do_bind_all('mlx4_core', to_bind_list)
if ret:
raise DpdkSetup('Unable to bind interfaces to driver mlx5_core/mlx4_core.')
return MLX_EXIT_CODE
else:
if march == 'ppc64le':
print('Trying to bind to vfio-pci ...')
self.try_bind_to_vfio_pci(to_bind_list)
return
else:
# if igb_uio is ready, use it as safer choice, afterwards try vfio-pci
if load_igb_uio():
print('Trying to bind to igb_uio ...')
ret = self.do_bind_all('igb_uio', to_bind_list)
if ret:
raise DpdkSetup('Unable to bind interfaces to driver igb_uio.') # module present, loaded, but unable to bind
return
try:
print('Trying to bind to vfio-pci ...')
self.try_bind_to_vfio_pci(to_bind_list)
return
except VFIOBindErr as e:
pass
#print(e)
print('Trying to compile and bind to igb_uio ...')
compile_and_load_igb_uio()
ret = self.do_bind_all('igb_uio', to_bind_list)
if ret:
raise DpdkSetup('Unable to bind interfaces to driver igb_uio.')
elif Mellanox_cnt:
return MLX_EXIT_CODE
elif Napatech_cnt:
return NTACC_EXIT_CODE
def do_return_to_linux(self):
if not self.m_devices:
self.run_dpdk_lspci()
dpdk_interfaces = []
check_drivers = set()
for device in self.m_devices.values():
if device.get('Driver_str') in dpdk_nic_bind.dpdk_drivers:
dpdk_interfaces.append(device['Slot'])
check_drivers.add(device['Driver_str'])
if not dpdk_interfaces:
print('No DPDK bound interfaces.')
return
any_driver_used = False
for driver in check_drivers:
if dpdk_nic_bind.is_module_used(driver):
any_driver_used = True
if any_driver_used:
pid = dpdk_nic_bind.get_pid_using_pci(dpdk_interfaces)
if pid:
cmdline = dpdk_nic_bind.read_pid_cmdline(pid)
print('DPDK interfaces are in use. Unbinding them might cause following process to hang:\npid: %s, cmd: %s' % (pid, cmdline))
if not dpdk_nic_bind.confirm('Confirm (y/N):'):
sys.exit(-1)
# DPDK => Linux
drivers_table = {
'net_ixgbe': 'ixgbe',
'net_ixgbe_vf': 'ixgbevf',
'net_e1000_igb': 'igb',
'net_i40e': 'i40e',
'net_i40e_vf': 'i40evf',
'net_e1000_em': 'e1000',
'net_vmxnet3': 'vmxnet3',
'net_virtio': 'virtio-pci',
'net_enic': 'enic',
'net_bnxt': 'bnxt_en',
}
nics_info = dpdk_nic_bind.get_info_from_trex(dpdk_interfaces)
if not nics_info:
raise DpdkSetup('Could not determine interfaces information. Try to run manually: sudo ./t-rex-64 --dump-interfaces')
for pci, info in nics_info.items():
if pci not in self.m_devices:
raise DpdkSetup('Internal error: PCI %s is not found among devices' % pci)
dev = self.m_devices[pci]
if info['TRex_Driver'] not in drivers_table:
raise DpdkSetup("Got unknown driver '%s', description: %s" % (info['TRex_Driver'], dev['Device_str']))
linux_driver = drivers_table[info['TRex_Driver']]
print('Returning to Linux %s' % pci)
success = dpdk_nic_bind.bind_one(pci, linux_driver, False)
if not success:
if linux_driver not in dpdk_nic_bind.get_loaded_modules():
print("No Linux driver installed, or wrong module name: %s" % linux_driver)
else:
print('Could not bind interface to module name: %s' % linux_driver)
def split_pci_key(self, pci_id):
return pci_id.split('/')[0]
def _get_cpu_topology(self):
cpu_topology = OrderedDict()
base_path = "/sys/devices/system/cpu"
cpus = []
file_re = re.compile(base_path + '/cpu([0-9]+)$')
for cpu_dir in glob.glob('{}/cpu*'.format(base_path)):
cpu_obj = file_re.match(cpu_dir)
if cpu_obj:
cpus.append(int(cpu_obj.group(1)))
cpus.sort()
for cpu in cpus:
# Find the socket ID of the current CPU
try:
with open("{}/cpu{}/topology/physical_package_id".format(base_path, cpu)) as f:
socket = int(f.read())
except IOError:
continue
except:
break
if socket not in cpu_topology:
cpu_topology[socket] = OrderedDict()
# Find the core ID of the current CPU
try:
with open("{}/cpu{}/topology/core_id".format(base_path, cpu)) as f:
core = int(f.read())
except IOError:
continue
except:
break
if core not in cpu_topology[socket]:
cpu_topology[socket][core] = []
# Capture the socket/core of the current CPU
cpu_topology[socket][core].append(cpu)
if not cpu_topology:
raise DpdkSetup('Could not determine CPU topology from %s' % base_path)
return cpu_topology
# input: list of different descriptions of interfaces: index, pci, name etc.
# Binds to dpdk wanted interfaces, not bound to any driver.
# output: list of maps of devices in dpdk_* format (self.m_devices.values())
def _get_wanted_interfaces(self, input_interfaces, get_macs = True):
if type(input_interfaces) is not list:
raise DpdkSetup('type of input interfaces should be list')
if not len(input_interfaces):
raise DpdkSetup('Please specify interfaces to use in the config')
if len(input_interfaces) % 2:
raise DpdkSetup('Please specify even number of interfaces')
wanted_interfaces = []
sorted_pci = sorted(self.m_devices.keys())
for interface in input_interfaces:
if interface == 'dummy':
dev = {}
dev['Vendor_str'] = ''
dev['Slot'] = ''
dev['Slot_str'] = 'dummy'
dev['Device_str'] = 'dummy'
dev['NUMA'] = 0
dev['MAC'] = '00:00:00:00:00:00'
dev['Interface_argv'] = interface
wanted_interfaces.append(dict(dev))
continue
sub_interface = None
if "/" in interface:
interface,sub_interface = interface.split("/")
dev = None
try:
interface = int(interface)
if interface < 0 or interface >= len(sorted_pci):
raise DpdkSetup('Index of an interfaces should be in range 0:%s' % (len(sorted_pci) - 1))
dev = self.m_devices[sorted_pci[interface]]
except ValueError:
for d in self.m_devices.values():
if interface in (d['Interface'], d['Slot'], d['Slot_str']):
dev = d
break
if not dev:
raise DpdkSetup('Could not find information about this interface: %s' % interface)
if dev in wanted_interfaces and not sub_interface:
raise DpdkSetup('Interface %s is specified twice' % interface)
dev['Interface_argv'] = interface
if sub_interface:
nt_devs = dpdk_nic_bind.collect_nt_dev_info()
if nt_devs is None:
raise DpdkSetup('Sorry, for this script to function with Napatech SmartNICs, you either need ntservice running or alternatively unload 3gd kernel module')
dev['sub_interface'] = "/" + sub_interface
sub_interface = int(sub_interface)
try:
num_ports = nt_devs[dev["Slot"]].get("num_ports", 0)
except KeyError:
raise DpdkSetup('Sorry, I know nothing about sub interface %d/%d' % (interface,sub_interface))
if sub_interface >= num_ports or sub_interface < 0 :
raise DpdkSetup('Sub interface %s/%d is invalid (valid range: %s/0 - %s/%d)' %
(interface, sub_interface, interface, interface, num_ports-1))
if nt_devs:
dev['MAC'] = nt_devs[dev["Slot"]].get("Mac_" + str(sub_interface), dev["MAC"])
wanted_interfaces.append(dict(dev))
if get_macs:
unbound = []
dpdk_bound = []
for interface in wanted_interfaces:
if 'Driver_str' not in interface and 'Napatech' not in interface['Vendor_str']:
unbound.append(interface['Slot'])
elif interface.get('Driver_str') in dpdk_nic_bind.dpdk_drivers:
dpdk_bound.append(interface['Slot'])
if unbound or dpdk_bound:
for pci, info in dpdk_nic_bind.get_info_from_trex(unbound + dpdk_bound).items():
if pci not in self.m_devices:
raise DpdkSetup('Internal error: PCI %s is not found among devices' % pci)
self.m_devices[pci].update(info)
return wanted_interfaces
def do_create(self):
ips = map_driver.args.ips
def_gws = map_driver.args.def_gws
dest_macs = map_driver.args.dest_macs
if map_driver.args.force_macs:
ip_config = False
if ips:
raise DpdkSetup("If using --force-macs, should not specify ips")
if def_gws:
raise DpdkSetup("If using --force-macs, should not specify default gateways")
elif ips:
ip_config = True
if not def_gws:
raise DpdkSetup("If specifying ips, must specify also def-gws")
if dest_macs:
raise DpdkSetup("If specifying ips, should not specify dest--macs")
if len(ips) != len(def_gws) or len(ips) != len(map_driver.args.create_interfaces):
raise DpdkSetup("Number of given IPs should equal number of given def-gws and number of interfaces")
else:
if dest_macs:
ip_config = False
else:
ip_config = True
# gather info about NICS from dpdk_nic_bind.py
if not self.m_devices:
self.run_dpdk_lspci()
wanted_interfaces = self._get_wanted_interfaces(map_driver.args.create_interfaces, get_macs = not ip_config)
for i, interface in enumerate(wanted_interfaces):
dual_index = i + 1 - (i % 2) * 2
if ip_config:
if isinstance(ips, list) and len(ips) > i:
interface['ip'] = ips[i]
else:
interface['ip'] = '.'.join([str(i+1) for _ in range(4)])
if isinstance(def_gws, list) and len(def_gws) > i:
interface['def_gw'] = def_gws[i]
else:
interface['def_gw'] = '.'.join([str(dual_index+1) for _ in range(4)])
else:
dual_if = wanted_interfaces[dual_index]
if 'MAC' not in interface:
raise DpdkSetup('Could not determine MAC of interface: %s. Please verify with -t flag.' % interface['Interface_argv'])
if 'MAC' not in dual_if:
raise DpdkSetup('Could not determine MAC of interface: %s. Please verify with -t flag.' % dual_if['Interface_argv'])
interface['src_mac'] = interface['MAC']
if isinstance(dest_macs, list) and len(dest_macs) > i:
interface['dest_mac'] = dest_macs[i]
else:
interface['dest_mac'] = dual_if['MAC']
interface['loopback_dest'] = True
config = ConfigCreator(self._get_cpu_topology(), wanted_interfaces, include_lcores = map_driver.args.create_include, exclude_lcores = map_driver.args.create_exclude,
only_first_thread = map_driver.args.no_ht, ignore_numa = map_driver.args.ignore_numa,
prefix = map_driver.args.prefix, zmq_rpc_port = map_driver.args.zmq_rpc_port, zmq_pub_port = map_driver.args.zmq_pub_port)
if map_driver.args.output_config:
config.create_config(filename = map_driver.args.output_config)
else:
print('### Dumping config to screen, use -o flag to save to file')
config.create_config(print_config = True)
def do_interactive_create(self):
ignore_numa = False
cpu_topology = self._get_cpu_topology()
total_lcores = sum([len(lcores) for cores in cpu_topology.values() for lcores in cores.values()])
if total_lcores < 1:
raise DpdkSetup('Script could not determine number of cores of the system, exiting.')
elif total_lcores < 2:
if dpdk_nic_bind.confirm("You only have 1 core and can't run TRex at all. Ignore and continue? (y/N): "):
ignore_numa = True
else:
sys.exit(1)
elif total_lcores < 3:
if dpdk_nic_bind.confirm("You only have 2 cores and will be able to run only stateful without latency checks.\nIgnore and continue? (y/N): "):
ignore_numa = True
else:
sys.exit(1)
if map_driver.args.force_macs:
ip_based = False
elif dpdk_nic_bind.confirm("By default, IP based configuration file will be created. Do you want to use MAC based config? (y/N)"):
ip_based = False
else:
ip_based = True
ip_addr_digit = 1
if not self.m_devices:
self.run_dpdk_lspci()
dpdk_nic_bind.show_table(get_macs = not ip_based)
print('Please choose even number of interfaces from the list above, either by ID , PCI or Linux IF')
print('Stateful will use order of interfaces: Client1 Server1 Client2 Server2 etc. for flows.')
print('Stateless can be in any order.')
numa = None
for dev in self.m_devices.values():
if numa is None:
numa = dev['NUMA']
elif numa != dev['NUMA']:
print('For performance, try to choose each pair of interfaces to be on the same NUMA.')
break
while True:
try:
input = dpdk_nic_bind.read_line('Enter list of interfaces separated by space (for example: 1 3) : ')
create_interfaces = input.replace(',', ' ').replace(';', ' ').split()
wanted_interfaces = self._get_wanted_interfaces(create_interfaces, get_macs = not ip_based)
ConfigCreator._verify_devices_same_type(wanted_interfaces)
except Exception as e:
print(e)
continue
break
print('')
for interface in wanted_interfaces:
if interface['Active']:
print('Interface %s is active. Using it by TRex might close ssh connections etc.' % interface['Interface_argv'])
if not dpdk_nic_bind.confirm('Ignore and continue? (y/N): '):
sys.exit(-1)
for i, interface in enumerate(wanted_interfaces):
if not ip_based:
if 'MAC' not in interface:
raise DpdkSetup('Could not determine MAC of interface: %s. Please verify with -t flag.' % interface['Interface_argv'])
interface['src_mac'] = interface['MAC']
dual_index = i + 1 - (i % 2) * 2
dual_int = wanted_interfaces[dual_index]
if not ignore_numa and interface['NUMA'] != dual_int['NUMA']:
print('NUMA is different at pair of interfaces: %s and %s. It will reduce performance.' % (interface['Interface_argv'], dual_int['Interface_argv']))
if dpdk_nic_bind.confirm('Ignore and continue? (y/N): '):
ignore_numa = True
print('')
else:
return
if ip_based:
if ip_addr_digit % 2 == 0:
dual_ip_digit = ip_addr_digit - 1
else:
dual_ip_digit = ip_addr_digit + 1
ip = '.'.join([str(ip_addr_digit) for _ in range(4)])
def_gw= '.'.join([str(dual_ip_digit) for _ in range(4)])
ip_addr_digit += 1
print("For interface %s, assuming loopback to it's dual interface %s." % (interface['Interface_argv'], dual_int['Interface_argv']))
if dpdk_nic_bind.confirm("Putting IP %s, default gw %s Change it?(y/N)." % (ip, def_gw)):
while True:
ip = dpdk_nic_bind.read_line('Please enter IP address for interface %s: ' % interface['Interface_argv'])
if not ConfigCreator._verify_ip(ip):
print ("Bad IP address format")
else:
break
while True:
def_gw = dpdk_nic_bind.read_line('Please enter default gateway for interface %s: ' % interface['Interface_argv'])
if not ConfigCreator._verify_ip(def_gw):
print ("Bad IP address format")
else:
break
wanted_interfaces[i]['ip'] = ip
wanted_interfaces[i]['def_gw'] = def_gw
else:
dest_mac = dual_int['MAC']
loopback_dest = True
print("For interface %s, assuming loopback to it's dual interface %s." % (interface['Interface_argv'], dual_int['Interface_argv']))
if dpdk_nic_bind.confirm("Destination MAC is %s. Change it to MAC of DUT? (y/N)." % dest_mac):
while True:
input_mac = dpdk_nic_bind.read_line('Please enter new destination MAC of interface %s: ' % interface['Interface_argv'])
try:
if input_mac:
ConfigCreator.verify_mac(input_mac) # verify format
dest_mac = input_mac
loopback_dest = False
else:
print('Leaving the loopback MAC.')
except Exception as e:
print(e)
continue
break
wanted_interfaces[i]['dest_mac'] = dest_mac
wanted_interfaces[i]['loopback_dest'] = loopback_dest
config = ConfigCreator(cpu_topology, wanted_interfaces, include_lcores = map_driver.args.create_include, exclude_lcores = map_driver.args.create_exclude,
only_first_thread = map_driver.args.no_ht, ignore_numa = map_driver.args.ignore_numa or ignore_numa,
prefix = map_driver.args.prefix, zmq_rpc_port = map_driver.args.zmq_rpc_port, zmq_pub_port = map_driver.args.zmq_pub_port)
if dpdk_nic_bind.confirm('Print preview of generated config? (Y/n)', default = True):
config.create_config(print_config = True)
if dpdk_nic_bind.confirm('Save the config to file? (Y/n)', default = True):
print('Default filename is /etc/trex_cfg.yaml')
filename = dpdk_nic_bind.read_line('Press ENTER to confirm or enter new file: ')
if not filename:
filename = '/etc/trex_cfg.yaml'
config.create_config(filename = filename)
def parse_parent_cfg (parent_cfg):
parent_parser = argparse.ArgumentParser(add_help = False)
parent_parser.add_argument('-?', '-h', '--help', dest = 'help', action = 'store_true')
parent_parser.add_argument('--cfg', default='')
parent_parser.add_argument('--prefix', default='')
parent_parser.add_argument('--dump-interfaces', nargs='*', default=None)
parent_parser.add_argument('--no-ofed-check', action = 'store_true')
parent_parser.add_argument('--no-scapy-server', action = 'store_true')
parent_parser.add_argument('--bird-server', action = 'store_true', default=False)
parent_parser.add_argument('--emu', action = 'store_true', default=False)
parent_parser.add_argument('--emu-zmq-tcp', action = 'store_true', default=False)
parent_parser.add_argument('--scapy-server', action = 'store_true')
parent_parser.add_argument('--no-watchdog', action = 'store_true')
parent_parser.add_argument('--astf', action = 'store_true')
parent_parser.add_argument('--limit-ports', type = int)
parent_parser.add_argument('-f', dest = 'file')
parent_parser.add_argument('-t', dest = 'tunable', default=None)
parent_parser.add_argument('-i', action = 'store_true', dest = 'interactive', default = False)
parent_parser.add_argument("--unbind-unused-ports", action='store_true')
map_driver.parent_args, _ = parent_parser.parse_known_args(shlex.split(parent_cfg))
if pa().help:
sys.exit(0)
if pa().limit_ports is not None:
if pa().limit_ports % 2:
raise DpdkSetup('ERROR: --limit-ports CLI argument must be even number, got: %s' % pa().limit_ports)
if pa().limit_ports <= 0:
raise DpdkSetup('ERROR: --limit-ports CLI argument must be positive, got: %s' % pa().limit_ports)
def process_options ():
parser = argparse.ArgumentParser(usage="""
Examples:
---------
To return to Linux the DPDK bound interfaces (for ifconfig etc.)
sudo ./dpdk_set_ports.py -L
To create TRex config file using interactive mode
sudo ./dpdk_set_ports.py -i
To create a default config file (example)
sudo ./dpdk_setup_ports.py -c 02:00.0 02:00.1 -o /etc/trex_cfg.yaml
To show interfaces status
sudo ./dpdk_set_ports.py -s
To see more detailed info on interfaces (table):
sudo ./dpdk_set_ports.py -t
""",
description=" unbind dpdk interfaces ",
epilog=" written by hhaim");
parser.add_argument("-l", '-L', "--linux", action='store_true',
help=""" Return all DPDK interfaces to Linux driver """,
)
parser.add_argument("--cfg",
help=""" configuration file name """,
)
parser.add_argument("--parent",
help=argparse.SUPPRESS
)
parser.add_argument('--dump-pci-description', help=argparse.SUPPRESS, dest='dump_pci_desc', action='store_true')
parser.add_argument("-i", "--interactive", action='store_true',
help=""" Create TRex config in interactive mode """,
)
parser.add_argument("-c", "--create", nargs='*', default=None, dest='create_interfaces', metavar='<interface>',
help="""Try to create a configuration file by specifying needed interfaces by PCI address or Linux names: eth1 etc.""",
)
parser.add_argument("--ci", "--cores-include", nargs='*', default=[], dest='create_include', metavar='<cores>',
help="""White list of cores to use. Make sure there is enough for each NUMA.""",
)
parser.add_argument("--ce", "--cores-exclude", nargs='*', default=[], dest='create_exclude', metavar='<cores>',
help="""Black list of cores to exclude. Make sure there will be enough for each NUMA.""",
)
parser.add_argument("--cleanup-servers", action='store_true',
help="Kill all Python servers (PyBird and Scapy).",
)
parser.add_argument("--no-ht", default=False, dest='no_ht', action='store_true',
help="""Use only one thread of each Core in created config yaml (No Hyper-Threading).""",
)
parser.add_argument("--dest-macs", nargs='*', default=[], action='store',
help="""Destination MACs to be used in created yaml file. Without them, will assume loopback (0<->1, 2<->3 etc.)""",
)
parser.add_argument("--force-macs", default=False, action='store_true',
help="""Use MACs in created config file.""",
)
parser.add_argument("--ips", nargs='*', default=[], action='store',
help="""IP addresses to be used in created yaml file. Without them, will assume loopback (0<->1, 2<->3 etc.)""",
)
parser.add_argument("--def-gws", nargs='*', default=[], action='store',
help="""Default gateways to be used in created yaml file. Without them, will assume loopback (0<->1, 2<->3 etc.)""",
)
parser.add_argument("-o", default=None, action='store', metavar='PATH', dest = 'output_config',
help="""Output the config to this file.""",
)
parser.add_argument("--prefix", default=None, action='store',
help="""Advanced option: prefix to be used in TRex config in case of parallel instances.""",
)
parser.add_argument("--zmq-pub-port", default=None, action='store',
help="""Advanced option: ZMQ Publisher port to be used in TRex config in case of parallel instances.""",
)
parser.add_argument("--zmq-rpc-port", default=None, action='store',
help="""Advanced option: ZMQ RPC port to be used in TRex config in case of parallel instances.""",
)
parser.add_argument("--ignore-numa", default=False, action='store_true',
help="""Advanced option: Ignore NUMAs for config creation. Use this option only if you have to, as it will reduce performance.""",
)
parser.add_argument("-s", "--show", action='store_true',
help=""" Show the status """,
)
parser.add_argument("-t", "--table", action='store_true',
help=""" Show table with NICs info """,
)
parser.add_argument("-m", "--memory", action='store_true',
help=""" Show memory banks topology (channels per NUMA) """,
)
parser.add_argument('--version', action='version',
version="0.2" )
map_driver.args = parser.parse_args();
if map_driver.args.parent :
parse_parent_cfg (map_driver.args.parent)
if pa().cfg:
map_driver.cfg_file = pa().cfg;
if pa().prefix:
map_driver.prefix = pa().prefix
if map_driver.args.cfg :
map_driver.cfg_file = map_driver.args.cfg;
def signal_handler(sig, frame):
sys.stdout.write('Caught signal SIGUSR1, printing traceback:')
sys.stdout.flush()
traceback.print_stack(frame)
sys.exit(1)
def should_scapy_server_run():
return not pa().no_scapy_server and pa().interactive and (pa().scapy_server or not pa().astf)
def kill_scapy():
ret = os.system('%s general_daemon_server stop -n Scapy' % sys.executable)
if ret:
print("Could not stop scapy daemon server.")
sys.exit(-1)
def kill_pybird():
ret = os.system('%s general_daemon_server stop -n PyBird' % sys.executable)
if ret:
print("Could not stop bird daemon server.")
sys.exit(-1)
def kill_emu():
ret = os.system('%s general_daemon_server stop -n Emu' % sys.executable)
if ret:
print("Could not stop bird daemon server.")
sys.exit(-1)
def cleanup_servers():
''' cleanup scapy and bird servers '''
if should_scapy_server_run():
kill_scapy()
if pa().bird_server:
kill_pybird()
if pa().emu:
kill_emu()
def main ():
try:
if os.getuid() != 0:
raise DpdkSetup('Please run this program as root/with sudo')
signal.signal(signal.SIGUSR1, signal_handler)
process_options ()
if map_driver.args.cleanup_servers:
cleanup_servers()
return
if map_driver.args.show:
dpdk_nic_bind.show_status()
return
if map_driver.args.table:
dpdk_nic_bind.show_table()
return
if map_driver.args.memory:
dpdk_nic_bind.show_memory()
return
if map_driver.args.dump_pci_desc:
dpdk_nic_bind.dump_pci_description()
return
obj =CIfMap(map_driver.cfg_file);
if map_driver.args.create_interfaces is not None:
obj.do_create();
elif map_driver.args.interactive:
obj.do_interactive_create();
elif map_driver.args.linux:
obj.do_return_to_linux();
elif pa() is None or pa().dump_interfaces is None:
ret = obj.do_run()
print('The ports are bound/configured.')
sys.exit(ret)
elif pa().dump_interfaces:
obj.config_hugepages(1)
print('')
except DpdkSetup as e:
print(e)
sys.exit(-1)
except Exception:
traceback.print_exc()
sys.exit(-1)
except KeyboardInterrupt:
print('Ctrl+C')
sys.exit(-1)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3229058
|
#!/usr/bin/env python
import collections
import itertools
import json
import re
import ankipandas
from pypinyin import pinyin, Style
max_words = 10
MAX_RANK = 30_000
freq = json.load(open("global_wordfreq.release_UTF-8.json"))
freq = {word: rank for word, rank in freq.items() if rank <= MAX_RANK}
COLS = {
"1": "e30000",
"2": "02b31c",
"3": "1510f0",
"4": "8900bf",
"5": "777777",
}
def colorize_char(char, tone):
col = COLS[tone]
return f'<span style="color:#{col}">{char}</span>'
def colorize_word(word):
pinyin_syllabels = pinyin(word, style=Style.TONE3, heteronym=False)
tones = [syl[0][-1] for syl in pinyin_syllabels]
tones = [tone if tone.isdigit() else "5" for tone in tones]
#print(word, tones)
return "".join(colorize_char(char, tone) for char, tone in zip(word, tones))
col = ankipandas.Collection()
han = col.notes[col.notes.nmodel == "Hanzi Writing"].copy()
han.fields_as_columns(inplace=True)
chars = list(han.nfld_Hanzi)
chars_in_col = set(chars)
words = collections.defaultdict(list)
for word, rank in sorted(freq.items(), key=lambda x: x[1]):
print(word, rank)
for char in word:
if char in chars_in_col and len(words[char]) < max_words:
try:
colored = colorize_word(word)
except KeyError as exc:
print(f"unable to colorize {word}, {exc}")
continue
#words[char].append(f"{colored}: {rank}")
words[char].append(colored)
words = {k: v[:max_words] for k, v in words.items()}
words = {k: "<br>".join(v) for k, v in words.items()}
# print(list(words.items())[:10])
char_words = [words.get(c, "") for c in chars]
han["nfld_Words"] = char_words
han.fields_as_list(inplace=True)
col.notes.update(han)
print(col.summarize_changes())
|
StarcoderdataPython
|
3321864
|
<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
"""
SDA.py: Plots the probability of the SDA model generating a connection
based on different parameters for Figure 2.4.
"""
def prop(a, b, d):
return 1/(1+(1/b*d)**a)
n = 3
alphas = [2, 3, 8]
betas = [2, 3, 5]
colors = ['r', 'g', 'b']
d = np.arange(0.0, 10.0, 0.1)
ax = plt.subplot()
for i in range(n):
res = prop(alphas[i], betas[i], d)
label = 'alpha= {}, beta= {}'.format(alphas[i], betas[i])
ax.plot(d, res, color=colors[i], label=label)
plt.xlabel('d(i,j)')
plt.ylabel('p(i,j)')
plt.legend()
plt.show()
|
StarcoderdataPython
|
3203814
|
from flask import Flask, request, render_template, jsonify, url_for
from utils import clean_text
import pickle
import time
import os
app = Flask(__name__)
MODEL_VERSION = 'model_V0.pkl'
VECTORIZER_VERSION = 'vectorizer_V0.pkl'
# load model assets
vectorizer_path = os.path.join(os.getcwd(), 'model_assets', VECTORIZER_VERSION)
model_path = os.path.join(os.getcwd(), 'model_assets', MODEL_VERSION)
vectorizer = pickle.load(open(vectorizer_path, 'rb'))
model = pickle.load(open(model_path, 'rb'))
# TODO: add versioning to url
@app.route('/', methods=['GET', 'POST'])
def predict():
""" Main webpage with user input through form and prediction displayed
:return: main webpage host, displays prediction if user submitted in text field
"""
if request.method == 'POST':
response = request.form['text']
input_text = clean_text(response)
input_text = vectorizer.transform([input_text])
prediction = model.predict(input_text)
prediction = 'Cyber-Troll' if prediction[0] == 1 else 'Non Cyber-Troll'
return render_template('index.html', text=prediction, submission=response)
if request.method == 'GET':
return render_template('index.html')
# TODO: add versioning to api
@app.route('/predict', methods=['POST'])
def predict_api():
""" endpoint for model queries (non gui)
:return: json, model prediction and response time
"""
start_time = time.time()
request_data = request.json
input_text = request_data['data']
input_text = clean_text(input_text)
input_text = vectorizer.transform([input_text])
prediction = model.predict(input_text)
prediction = 'Cyber-Troll' if prediction[0] == 1 else "Non Cyber-Troll" # post processing
response = {'prediction': prediction, 'response_time': time.time() - start_time}
return jsonify(response)
if __name__ == '__main__':
app.run(debug=True)
|
StarcoderdataPython
|
1751300
|
import io
import json
import pandas as pd
from sachima.filter_enum import FilterEnum
from sachima.log import logger
def delfunc(sql, e):
buf = io.StringIO(sql)
temp = ""
for line in buf.readlines():
if "{" + e + "}" in line and "-- ifnulldel" in line:
# logger.debug("del sql line: " + line)
pass
elif "{" + e + "}" in line and "-- ifnulldel" not in line:
temp += line.replace("{" + e + "}", "")
else:
temp += line
return temp
def sql_format(sql, params):
try:
# logger.info("sql lens:" + str(len(sql)))
return sql.format(**params)
except KeyError as e:
newsql = delfunc(sql, str(e).replace("'", ""))
return sql_format(newsql, params)
def set_sql_params(sql, params):
"""
set params to sql
return sql str\n
for example:
select {colname1} from {tablename} where {colname2} = '{value}'
"""
copy_params = {}
copy_params.update(params)
# convert dict to tuple for sql
for k in params:
logger.info(
"set sql param {} {} type {} ".format(
k, copy_params[k], type(copy_params[k])
)
)
if isinstance(copy_params[k], list):
if k + "0" in sql or k + "1" in sql:
copy_params[k + "0"] = copy_params[k][0]
copy_params[k + "1"] = copy_params[k][1]
if len(copy_params[k]) == 0:
copy_params[k] = [""]
copy_params[k] = str(tuple(copy_params[k])).replace(",)", ")")
# logger.debug("convert dict to tuple for sql: " + copy_params[k])
finalsql = sql_format(sql, copy_params)
logger.debug(finalsql)
return finalsql
class Filter:
def __init__(self, id, setter, **kw):
"""
id: str
setter: tuple
"""
self.id = id
self.setter = setter
self.kw = kw
def __repr__(self):
return "Filter({!r})".format(self.id)
def to_json(self, data):
res = {"id": "", "props": {}}
res.update(self.kw)
res["id"] = self.id
if not isinstance(data, pd.DataFrame):
raise TypeError("pd.DataFrame expected but get {}".format(str(type(data))))
# todo: json str from enumn tree improve
for arg in self.setter:
if isinstance(arg, FilterEnum.TYPE):
res["type"] = arg.value
if isinstance(arg, FilterEnum.PROPS.MODE):
res["props"].update({"mode": arg.value})
if isinstance(arg, FilterEnum.PROPS.ALLOWCLEAR):
res["props"].update({"allowClear": arg.value})
if isinstance(arg, FilterEnum.PROPS.SIZE):
res["props"].update({"size": arg.value})
if isinstance(arg, FilterEnum.PROPS.SHOWTIME):
res["props"].update({"showTime": arg.value})
if isinstance(arg, FilterEnum.PROPS.DEFAULTOPEN):
res["props"].update({"defaultOpen": arg.value})
if isinstance(arg, FilterEnum.PROPS.OPEN):
res["props"].update({"defaultOpen": arg.value})
if isinstance(arg, FilterEnum.PROPS.LOADING):
res["props"].update({"loading": arg.value})
if isinstance(arg, FilterEnum.PROPS.SHOWSEARCH):
res["props"].update({"showSearch": arg.value})
if isinstance(arg, dict):
colname = arg.get("option", None)
if isinstance(colname, str) and colname in data.columns:
res.update(
{"option": data[colname].map(lambda x: x).unique().tolist()}
)
elif isinstance(colname, list):
res.update(arg)
elif isinstance(colname, str):
res.update({"option": ["handler返回的数据没有字段名" + colname + " 请手动输入数据"]})
res["props"].update({"mode": "tags"})
else:
res["props"].update(arg)
return res
|
StarcoderdataPython
|
3235214
|
from django.contrib import admin
from .models import *
from .forms import MultipleChoiceTestAnswerInlineFormset
from Test.models import Test
class MultipleChoiceTestAnswerInline(admin.TabularInline):
model = MultipleChoiceTestAnswer
formset = MultipleChoiceTestAnswerInlineFormset
extra = 0
verbose_name = "Варіант відповіді"
verbose_name_plural = "Варіанти відповіді"
class TestInTaskListInline(admin.TabularInline):
model = Test.task_lists.through
extra = 0
verbose_name = "Список питань"
verbose_name_plural = "Списки питань"
readonly_fields = ("test", "task_count")
can_delete = False
class TaskListInKnowledgeFieldInline(admin.TabularInline):
model = TaskList
extra = 0
verbose_name = "Список завдань"
verbose_name_plural = "Списки завдань"
class KnowledgeFieldAdmin(admin.ModelAdmin):
list_display = (
"__str__",
)
search_fields = (
"name",
)
inlines = (
TaskListInKnowledgeFieldInline,
)
class TaskListAdmin(admin.ModelAdmin):
list_display = (
"__str__",
)
search_fields = (
"name",
"knowledge_field__name",
)
inlines = (
TestInTaskListInline,
)
class MultipleChoiceTestAdmin(admin.ModelAdmin):
list_display = (
"pk",
"__str__",
"task_list",
)
search_fields = (
"text",
)
inlines = (
MultipleChoiceTestAnswerInline,
)
admin.site.register(KnowledgeField, KnowledgeFieldAdmin)
admin.site.register(TaskList, TaskListAdmin)
admin.site.register(MultipleChoiceTest, MultipleChoiceTestAdmin)
|
StarcoderdataPython
|
1651451
|
<filename>examples/order_management/order/src/demo_data.py
import asyncio
from src.store import Base, create_order, engine
async def create_demo_data():
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
await create_order(1, ["apple", "orange"])
await create_order(1, ["python", "boa"])
await create_order(2, ["pizza", "burger", "coca-cola"])
if __name__ == "__main__":
asyncio.run(create_demo_data())
|
StarcoderdataPython
|
178268
|
from typing import Text, Any, Dict, List, Union, Optional, Tuple, Set
from anytree import Resolver
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from anytree.node.nodemixin import NodeMixin
from anytree.node.util import _repr
from anytree.search import findall, findall_by_attr, find
from sagas.conf.conf import cf
import pandas as pd
import logging
from sagas.nlu.anal_data_types import ref_
from sagas.nlu.warehouse_bucket import AnalRecord
from sagas.nlu.warehouse_intf import ResourceType
from sagas.nlu.warehouse_service import AnalService
from sagas.ofbiz.services import OfService as s, oc, track
from sagas.ofbiz.entities import OfEntity as e, all_entities
logger = logging.getLogger(__name__)
class Warehouse(NodeMixin, object):
name: str
def __init__(self, parent=None, children=None, **kwargs):
self.__dict__.update(kwargs)
self.parent = parent
if children:
self.children = children
def __repr__(self):
return _repr(self)
@staticmethod
def all_entities():
return all_entities(include_view=True)
@staticmethod
def all_services():
return oc.all_service_names()
@staticmethod
def create() -> 'Warehouse':
ents=[cf.get_bucket(e)(name=e, resource_type=ResourceType.EntityModel) for e in all_entities(include_view=True)]
services = [AnalService(name=e, resource_type=ResourceType.ServiceModel) for e in Warehouse.all_services()]
wh=Warehouse(name='_warehouse', children=[*ents, *services])
return wh
def entity(self, ent_name):
return find(self, lambda node: node.name == ent_name and node.resource_type==ResourceType.EntityModel, maxlevel=2)
def service(self, name):
return find(self, lambda node: node.name == name and node.resource_type==ResourceType.ServiceModel, maxlevel=2)
def __floordiv__(self, patt):
"""
>>> from sagas.nlu.warehouse import warehouse as wh
>>> wh//'find*'
>>> wh//'*Person*'
>>> [(el, el.words) for el in wh//'*Person*']
:param patt:
:return:
"""
if isinstance(patt, str):
r = Resolver('name')
return r.glob(self, patt)
elif isinstance(patt, ref_):
val=self.resolve_entity(patt.val)
return [val]
else:
return []
def __truediv__(self, patt):
rs= self.__floordiv__(patt)
return rs[0] if rs else None
def resolve_entity(self, gid):
from sagas import from_global_id
t, _ = from_global_id(gid)
ent = self / t
val=ent.meta.global_ref.get_record(gid)
return AnalRecord(name=val.getEntityName(),
resource_type=ResourceType.EntityValue,
value=val)
def get_gid(self, val):
ent = self / val.getEntityName()
return ent.meta.global_ref.get_gid(val)
@property
def qualified(self):
return '_'
def get(self, path):
"""
>>> from sagas.nlu.warehouse import warehouse as wh
>>> wh.get("/_/ent:Person")
:param path:
:return:
"""
r = Resolver('qualified')
return r.get(self, path)
def ping(self) -> Tuple[bool, Dict[Text, Any]]:
from sagas.ofbiz.services import OfService as s, oc, track
ok, r = track(lambda a: s().testScv(defaultValue=5.5, message="hello world"))
return ok, r
@property
def srv(self):
from sagas.ofbiz.services import OfService as s
return s()
@property
def ent(self):
from sagas.ofbiz.entities import OfEntity as e
return e()
def e(self, dialect=None):
"""
>>> from sagas.nlu.warehouse import warehouse as wh
>>> wh.e('dict').refPerson('10000')
:param dialect:
:return:
"""
from sagas.ofbiz.entities import OfEntity as e
return e(dialect)
warehouse=Warehouse.create()
|
StarcoderdataPython
|
4806255
|
<gh_stars>1-10
import logging
from typing import Tuple
import torch
from torch import Tensor
from tqdm import tqdm
from . import optim
from .extrinsics import full_extrinsics, partial_extrinsics
from .geometry import check_origin, spiral, transform
from .orthonorm import orthonorm
from .plot import Scatter
from .polyfit import poly_theta_to_r, polyfit
from .projection import project_poly_rz, project_poly_thetar
def get_reprojection_error(poly: Tensor, r: Tensor, t: Tensor, ip: Tensor,
wp: Tensor) -> Tensor:
'''
Reprojection error per image point
'''
view_points = transform(wp, r, t)
ip_c = project_poly_rz(view_points, poly)
reprojection_error = torch.linalg.norm(ip - ip_c, dim=-1)
return reprojection_error
def fit_reprojection_error(degree: int, r: Tensor, t_par: Tensor, ip: Tensor,
wp: Tensor) -> float:
'''
Fit polynomial and calculate mean reprojection error
'''
poly, t = polyfit(degree, ip, wp, r, t_par)
# reject solution
if poly[0] < 0 or not check_origin(r, t).squeeze().item():
return float('inf')
return get_reprojection_error(poly, r, t, ip, wp).squeeze()
def show_points(title: str, figure_path: str, image_points: Tensor,
projected_points: Tensor,
images=None, image_shape=None) -> None:
'''
Scatter plot of image points and reprojected points
'''
scatter = Scatter(title, len(image_points))
if images is not None:
scatter.imshow(images, image_shape)
scatter(image_points, color='g', marker='o')
scatter(projected_points, color='r', marker='x')
scatter.save(f'{figure_path}.pdf', dpi=300)
scatter.show()
def _latex_float(v):
'''
Format number in latex math syntax
'''
s = f'{v:.1e}'
if s.endswith('e+00'):
return s[:-4]
elif s == 'nan':
return '\\mathrm{nan}'
else:
base, exponent = s.split('e')
base, exponent = float(base), int(exponent)
return f'{base}\\mathrm{{e}}{{{exponent:+d}}}'
def get_error_str(x):
'''
Format error to nice string
'''
x = x.flatten()
return (
f'reprojection error $\\mu={_latex_float(x.mean())},'
f' \\sigma={_latex_float(x.std() if len(x) > 1 else 0.)}$'
)
def calibrate(degree: int, reprojection_error_threshold: float,
reprojection_count: int, image_points: Tensor,
world_points: Tensor, principal_point_initial: Tensor,
images: Tensor = None,
image_shape: Tuple[int, int] = None,
spiral_step: int = 10, spiral_end: int = 100) \
-> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
'''
Full calibration algorithm
'''
logger = logging.getLogger('calibrate')
if spiral_step and spiral_end:
logger.info(
'Brute-force search for principal point'
f' ({reprojection_count} images'
' with mean reprojection error <='
f' {reprojection_error_threshold:.1f})'
)
progress = tqdm(list(spiral(spiral_step, end=spiral_end)))
else:
progress = tqdm([principal_point_initial])
for x_spiral, y_spiral in progress:
principal_point = principal_point_initial + \
image_points.new_tensor((x_spiral, y_spiral))
image_points_spiral = image_points - principal_point
R_par, T_par = partial_extrinsics(image_points_spiral, world_points)
valid = []
results = []
for idx, (r_par, t_par) in enumerate(zip(R_par, T_par)):
ip = image_points_spiral[idx]
wp = world_points[idx]
min_error = float('inf')
best = None
for r_ort, t_par_ort in orthonorm(r_par, t_par):
e = fit_reprojection_error(
degree, r_ort[None], t_par_ort[None], ip[None], wp[None])
e_mean = e.mean().item() if isinstance(e, Tensor) else e
if e_mean < min_error:
min_error = e_mean
if e_mean < reprojection_error_threshold:
best = e, r_ort, t_par_ort
if best is None:
valid.append(False)
else:
valid.append(True)
results.append(best)
progress.set_description(
f'{min_error:.3f} {int(x_spiral):+4d} {int(y_spiral):+4d}')
if len(results) >= reprojection_count:
break
valid = torch.tensor(valid)
logger.info(f'Valid solution for {valid.sum()}/{len(valid)} images')
if not torch.any(valid):
raise Exception('No initial solution found')
reprojection_errors, R, T_par_ort = [torch.stack(x) for x in zip(*results)]
assert torch.allclose(torch.linalg.det(R), R.new_ones(len(R)))
mean_reprojection_errors = reprojection_errors.mean(dim=1)
reprojection_error_threshold = \
torch.sort(mean_reprojection_errors)[0][reprojection_count - 1]
fit_mask = mean_reprojection_errors <= reprojection_error_threshold
error_str = get_error_str(reprojection_errors[fit_mask])
logger.info(f'Initial {error_str}')
logger.info(
f'Initial principal point ({principal_point[0]:.1f},'
f' {principal_point[1]:.1f})'
)
logger.info(
f'Initial solution for {fit_mask.sum()}/{len(fit_mask)} selected'
)
valid[torch.where(valid)[0][~fit_mask]] = False
R = R[fit_mask]
assert torch.allclose(torch.linalg.det(R), R.new_ones(len(R)))
poly, T = polyfit(degree, image_points[valid] - principal_point,
world_points[valid], R, T_par_ort[fit_mask]
)
show_points(
f'Initial Solution for Subset ({error_str})',
'initial_solution',
image_points[valid],
project_poly_rz(
transform(world_points[valid], R, T), poly, principal_point),
images[valid] if images is not None else None,
image_shape
)
assert torch.allclose(torch.linalg.det(R), R.new_ones(len(R)))
success, reprojection_errors, res = optim.reprojection(
R, T, poly, principal_point, image_points[valid],
world_points[valid]
)
if not success:
raise Exception('Optmization failed')
error_str = get_error_str(reprojection_errors)
logger.info(f'Optimized {error_str}')
R, T, poly_theta, principal_point = res
assert torch.allclose(torch.linalg.det(R), R.new_ones(len(R)))
logger.info(
f'Optimized principal point ({principal_point[0]:.1f},'
f' {principal_point[1]:.1f})'
)
show_points(
f'Optimized Solution for Subset ({error_str})',
'optimized_solution',
image_points[valid],
project_poly_thetar(
transform(world_points[valid], R, T), poly_theta, principal_point),
images[valid] if images is not None else None,
image_shape
)
poly = poly_theta_to_r(poly_theta, transform(world_points[valid], R, T))
R, T = full_extrinsics(poly, image_points - principal_point, world_points)
assert torch.allclose(torch.linalg.det(R), R.new_ones(len(R)), atol=1e-2)
success, reprojection_errors, res = optim.reprojection(
R, T, poly, principal_point, image_points, world_points)
if not success:
raise Exception('Optmization failed')
error_str = get_error_str(reprojection_errors)
logger.info(f'Final {error_str}')
R, T, poly_theta, principal_point = res
assert torch.allclose(torch.linalg.det(R), R.new_ones(len(R)), atol=1e-2)
assert torch.all(check_origin(R, T))
logger.info(
f'Final principal point ({principal_point[0]:.1f},'
f' {principal_point[1]:.1f})'
)
show_points(
f'Final Solution ({error_str})',
'final_solution',
image_points,
project_poly_thetar(transform(world_points, R, T),
poly_theta, principal_point),
images,
image_shape
)
poly = poly_theta_to_r(poly_theta, transform(world_points, R, T))
return R, T, poly_theta, poly, principal_point
|
StarcoderdataPython
|
98186
|
import json
import os
import random
import string
from math import asin
from math import ceil
from math import cos
from math import degrees
from math import pi
from math import radians
from math import sin
from math import sqrt
from math import tan
from pyaedt.generic.general_methods import _retry_ntimes
from pyaedt.generic.general_methods import pyaedt_function_handler
from pyaedt.modeler.actors import Bird
from pyaedt.modeler.actors import Person
from pyaedt.modeler.actors import Vehicle
from pyaedt.modeler.GeometryOperators import GeometryOperators
from pyaedt.modeler.multiparts import Environment
from pyaedt.modeler.multiparts import MultiPartComponent
from pyaedt.modeler.Primitives import Primitives
class Primitives3D(Primitives, object):
"""Manages primitives in 3D tools.
This class is inherited in the caller application and is
accessible through the primitives variable part of modeler object(
e.g. ``hfss.modeler`` or ``icepak.modeler``).
Parameters
----------
application : str
Name of the application.
Examples
--------
Basic usage demonstrated with an HFSS, Maxwell 3D, Icepak, Q3D, or Mechanical design:
>>> from pyaedt import Hfss
>>> aedtapp = Hfss()
>>> prim = aedtapp.modeler
"""
def __init__(self):
Primitives.__init__(self)
self.multiparts = []
@pyaedt_function_handler()
def create_point(self, position, name=None, color="(143 175 143)"):
"""Create a point.
Parameters
----------
position : list
List of ``[x, y, z]`` coordinates. Note, The list can be empty or contain less than 3 elements.
name : str, optional
Name of the point. The default is ``None``, in which case the
default name is assigned.
color : str, optional
String exposing 3 int values such as "(value1 value2 value3)". Default value is ``"(143 175 143)"``.
Returns
-------
:class:`pyaedt.modeler.object3dlayout.Point`
Point object.
References
----------
>>> oEditor.CreateBox
Examples
--------
>>> from pyaedt import hfss
>>> hfss = Hfss()
>>> point_object = hfss.modeler.primivites.create_point([0,0,0], name="mypoint")
"""
x_position, y_position, z_position = self._pos_with_arg(position)
if not name:
unique_name = "".join(random.sample(string.ascii_uppercase + string.digits, 6))
name = "NewPoint_" + unique_name
parameters = ["NAME:PointParameters"]
parameters.append("PointX:="), parameters.append(x_position)
parameters.append("PointY:="), parameters.append(y_position)
parameters.append("PointZ:="), parameters.append(z_position)
attributes = ["NAME:Attributes"]
attributes.append("Name:="), attributes.append(name)
attributes.append("Color:="), attributes.append(color)
point = _retry_ntimes(10, self.oeditor.CreatePoint, parameters, attributes)
return self._create_point(name)
@pyaedt_function_handler()
def create_box(self, position, dimensions_list, name=None, matname=None):
"""Create a box.
Parameters
----------
position : list
Center point for the box in a list of ``[x, y, z]`` coordinates.
dimensions_list : list
Dimensions for the box in a list of ``[x, y, z]`` coordinates.
name : str, optional
Name of the box. The default is ``None``, in which case the
default name is assigned.
matname : str, optional
Name of the material. The default is ``None``, in which case the
default material is assigned. If the material name supplied is
invalid, the default material is assigned.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateBox
Examples
--------
>>> from pyaedt import hfss
>>> hfss = Hfss()
>>> origin = [0,0,0]
>>> dimensions = [10,5,20]
>>> #Material and name are not mandatory fields
>>> box_object = hfss.modeler.primivites.create_box(origin, dimensions, name="mybox", matname="copper")
"""
assert len(position) == 3, "Position Argument must be a valid 3 Element List"
assert len(dimensions_list) == 3, "Dimension Argument must be a valid 3 Element List"
XPosition, YPosition, ZPosition = self._pos_with_arg(position)
XSize, YSize, ZSize = self._pos_with_arg(dimensions_list)
vArg1 = ["NAME:BoxParameters"]
vArg1.append("XPosition:="), vArg1.append(XPosition)
vArg1.append("YPosition:="), vArg1.append(YPosition)
vArg1.append("ZPosition:="), vArg1.append(ZPosition)
vArg1.append("XSize:="), vArg1.append(XSize)
vArg1.append("YSize:="), vArg1.append(YSize)
vArg1.append("ZSize:="), vArg1.append(ZSize)
vArg2 = self._default_object_attributes(name=name, matname=matname)
new_object_name = _retry_ntimes(10, self.oeditor.CreateBox, vArg1, vArg2)
return self._create_object(new_object_name)
@pyaedt_function_handler()
def create_cylinder(self, cs_axis, position, radius, height, numSides=0, name=None, matname=None):
"""Create a cylinder.
Parameters
----------
cs_axis : int or str
Axis of rotation of the starting point around the center point.
:class:`pyaedt.constants.AXIS` Enumerator can be used as input.
position : list
Center point of the cylinder in a list of ``(x, y, z)`` coordinates.
radius : float
Radius of the cylinder.
height : float
Height of the cylinder.
numSides : int, optional
Number of sides. The default is ``0``, which is correct for
a cylinder.
name : str, optional
Name of the cylinder. The default is ``None``, in which case
the default name is assigned.
matname : str, optional
Name of the material. The default is ''None``, in which case the
default material is assigned.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateCylinder
Examples
--------
>>> from pyaedt import Hfss
>>> aedtapp = Hfss()
>>> cylinder_object = aedtapp.modeler..create_cylinder(cs_axis='Z', position=[0,0,0],
... radius=2, height=3, name="mycyl",
... matname="vacuum")
"""
if isinstance(radius, (int, float)) and radius < 0:
raise ValueError("Radius must be greater than 0.")
szAxis = GeometryOperators.cs_axis_str(cs_axis)
XCenter, YCenter, ZCenter = self._pos_with_arg(position)
Radius = self._arg_with_dim(radius)
Height = self._arg_with_dim(height)
vArg1 = ["NAME:CylinderParameters"]
vArg1.append("XCenter:="), vArg1.append(XCenter)
vArg1.append("YCenter:="), vArg1.append(YCenter)
vArg1.append("ZCenter:="), vArg1.append(ZCenter)
vArg1.append("Radius:="), vArg1.append(Radius)
vArg1.append("Height:="), vArg1.append(Height)
vArg1.append("WhichAxis:="), vArg1.append(szAxis)
vArg1.append("NumSides:="), vArg1.append("{}".format(numSides))
vArg2 = self._default_object_attributes(name=name, matname=matname)
new_object_name = self.oeditor.CreateCylinder(vArg1, vArg2)
return self._create_object(new_object_name)
@pyaedt_function_handler()
def create_polyhedron(
self,
cs_axis=None,
center_position=(0.0, 0.0, 0.0),
start_position=(0.0, 1.0, 0.0),
height=1.0,
num_sides=12,
name=None,
matname=None,
):
"""Create a regular polyhedron.
Parameters
----------
cs_axis : optional
Axis of rotation of the starting point around the center point.
The default is ``None``, in which case the Z axis is used.
center_position : list, optional
List of ``[x, y, z]`` coordinates for the center position.
The default is ``(0.0, 0.0, 0.0)``.
start_position : list, optional
List of ``[x, y, z]`` coordinates for the starting position.
The default is ``(0.0, 0.0, 0.0)``.
height : float, optional
Height of the polyhedron. The default is ``1.0``.
num_sides : int, optional
Number of sides of the polyhedron. The default is ``12``.
name : str, optional
Name of the polyhedron. The default is ``None``, in which the
default name is assigned.
matname : str, optional
Name of the material. The default is ``None``, in which the
default material is assigned.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateRegularPolyhedron
Examples
--------
>>> from pyaedt import Hfss
>>> aedtapp = Hfss()
>>> ret_obj = aedtapp.modeler.create_polyhedron(cs_axis='X', center_position=[0, 0, 0],
... start_position=[0,5,0], height=0.5,
... num_sides=8, name="mybox", matname="copper")
"""
test = cs_axis
cs_axis = GeometryOperators.cs_axis_str(cs_axis)
x_center, y_center, z_center = self._pos_with_arg(center_position)
x_start, y_start, z_start = self._pos_with_arg(start_position)
height = self._arg_with_dim(height)
vArg1 = ["NAME:PolyhedronParameters"]
vArg1.append("XCenter:="), vArg1.append(x_center)
vArg1.append("YCenter:="), vArg1.append(y_center)
vArg1.append("ZCenter:="), vArg1.append(z_center)
vArg1.append("XStart:="), vArg1.append(x_start)
vArg1.append("YStart:="), vArg1.append(y_start)
vArg1.append("ZStart:="), vArg1.append(z_start)
vArg1.append("Height:="), vArg1.append(height)
vArg1.append("NumSides:="), vArg1.append(int(num_sides))
vArg1.append("WhichAxis:="), vArg1.append(cs_axis)
vArg2 = self._default_object_attributes(name=name, matname=matname)
new_object_name = self.oeditor.CreateRegularPolyhedron(vArg1, vArg2)
return self._create_object(new_object_name)
@pyaedt_function_handler()
def create_cone(self, cs_axis, position, bottom_radius, top_radius, height, name=None, matname=None):
"""Create a cone.
Parameters
----------
cs_axis : str
Axis of rotation of the starting point around the center point.
The default is ``None``, in which case the Z axis is used.
center_position : list, optional
List of ``[x, y, z]`` coordinates for the center position
of the bottom of the cone.
bottom_radius : float
Bottom radius of the cone.
top_radius : float
Top radius of the cone.
height : float
Height of the cone.
name : str, optional
Name of the cone. The default is ``None``, in which case
the default name is assigned.
matname : str, optional
Name of the material. The default is ``None``, in which case
the default material is assigned.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateCone
Examples
--------
>>> from pyaedt import Hfss
>>> aedtapp = Hfss()
>>> cone_object = aedtapp.modeler.create_cone(cs_axis='Z', position=[0, 0, 0],
... bottom_radius=2, top_radius=3, height=4,
... name="mybox", matname="copper")
"""
if bottom_radius == top_radius:
raise ValueError("Bottom radius and top radius must have different values.")
if isinstance(bottom_radius, (int, float)) and bottom_radius < 0:
raise ValueError("Bottom radius must be greater than 0.")
if isinstance(top_radius, (int, float)) and top_radius < 0:
raise ValueError("Top radius must be greater than 0.")
if isinstance(height, (int, float)) and height <= 0:
raise ValueError("Height must be greater than 0.")
XCenter, YCenter, ZCenter = self._pos_with_arg(position)
szAxis = GeometryOperators.cs_axis_str(cs_axis)
Height = self._arg_with_dim(height)
RadiusBt = self._arg_with_dim(bottom_radius)
RadiusUp = self._arg_with_dim(top_radius)
vArg1 = ["NAME:ConeParameters"]
vArg1.append("XCenter:="), vArg1.append(XCenter)
vArg1.append("YCenter:="), vArg1.append(YCenter)
vArg1.append("ZCenter:="), vArg1.append(ZCenter)
vArg1.append("WhichAxis:="), vArg1.append(szAxis)
vArg1.append("Height:="), vArg1.append(Height)
vArg1.append("BottomRadius:="), vArg1.append(RadiusBt)
vArg1.append("TopRadius:="), vArg1.append(RadiusUp)
vArg2 = self._default_object_attributes(name=name, matname=matname)
new_object_name = self.oeditor.CreateCone(vArg1, vArg2)
return self._create_object(new_object_name)
@pyaedt_function_handler()
def create_sphere(self, position, radius, name=None, matname=None):
"""Create a sphere.
Parameters
----------
position : list
List of ``[x, y, z]`` coordinates for the center position
of the sphere.
radius : float
Radius of the sphere.
name : str, optional
Name of the sphere. The default is ``None``, in which case
the default name is assigned.
matname : str, optional
Name of the material. The default is ``None``, in which case
the default material is assigned.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateSphere
Examples
--------
>>> from pyaedt import Hfss
>>> aedtapp = Hfss()
>>> ret_object = aedtapp.modeler.create_sphere(position=[0,0,0], radius=2,
... name="mysphere", matname="copper")
"""
if len(position) != 3:
raise ValueError("Position argument must be a valid 3 elements List.")
if isinstance(radius, (int, float)) and radius < 0:
raise ValueError("Radius must be greater than 0.")
XCenter, YCenter, ZCenter = self._pos_with_arg(position)
Radius = self._arg_with_dim(radius)
vArg1 = ["NAME:SphereParameters"]
vArg1.append("XCenter:="), vArg1.append(XCenter)
vArg1.append("YCenter:="), vArg1.append(YCenter)
vArg1.append("ZCenter:="), vArg1.append(ZCenter)
vArg1.append("Radius:="), vArg1.append(Radius)
vArg2 = self._default_object_attributes(name=name, matname=matname)
new_object_name = self.oeditor.CreateSphere(vArg1, vArg2)
return self._create_object(new_object_name)
@pyaedt_function_handler()
def create_torus(self, center, major_radius, minor_radius, axis=None, name=None, material_name=None):
"""Create a torus.
Parameters
----------
center : list
Center point for the torus in a list of ``[x, y, z]`` coordinates.
major_radius : float
Major radius of the torus.
minor_radius : float
Minor radius of the torus.
axis : str, optional
Axis of revolution.
The default is ``None``, in which case the Z axis is used.
name : str, optional
Name of the torus. The default is ``None``, in which case the
default name is assigned.
material_name : str, optional
Name of the material. The default is ``None``, in which case the
default material is assigned. If the material name supplied is
invalid, the default material is assigned.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateTorus
Examples
--------
Create a torus named ``"mytorus"`` about the Z axis with a major
radius of 1, minor radius of 0.5, and a material of ``"copper"``.
>>> from pyaedt import Hfss
>>> hfss = Hfss()
>>> origin = [0, 0, 0]
>>> torus = hfss.modeler.create_torus(origin, major_radius=1,
... minor_radius=0.5, axis="Z",
... name="mytorus", material_name="copper")
"""
if len(center) != 3:
raise ValueError("Center argument must be a valid 3 element sequence.")
# if major_radius <= 0 or minor_radius <= 0:
# raise ValueError("Both major and minor radius must be greater than 0.")
# if minor_radius >= major_radius:
# raise ValueError("Major radius must be greater than minor radius.")
x_center, y_center, z_center = self._pos_with_arg(center)
axis = GeometryOperators.cs_axis_str(axis)
major_radius = self._arg_with_dim(major_radius)
minor_radius = self._arg_with_dim(minor_radius)
first_argument = ["NAME:TorusParameters"]
first_argument.append("XCenter:="), first_argument.append(x_center)
first_argument.append("YCenter:="), first_argument.append(y_center)
first_argument.append("ZCenter:="), first_argument.append(z_center)
first_argument.append("MajorRadius:="), first_argument.append(major_radius)
first_argument.append("MinorRadius:="), first_argument.append(minor_radius)
first_argument.append("WhichAxis:="), first_argument.append(axis)
second_argument = self._default_object_attributes(name=name, matname=material_name)
new_object_name = _retry_ntimes(10, self.oeditor.CreateTorus, first_argument, second_argument)
return self._create_object(new_object_name)
@pyaedt_function_handler()
def create_bondwire(
self,
start_position,
end_position,
h1=0.2,
h2=0,
alpha=80,
beta=5,
bond_type=0,
diameter=0.025,
facets=6,
name=None,
matname=None,
):
"""Create a bondwire.
Parameters
----------
start_position : list
List of ``[x, y, z]`` coordinates for the starting
position of the bond pad.
end_position : list
List of ``[x, y, z]`` coordinates for the ending position
of the bond pad.
h1 : float, optional
Height between the IC die I/O pad and the top of the bondwire.
The default is ``0.2``.
h2 : float, optional
Height of the IC die I/O pad above the lead frame. The default
is ``0``. A negative value indicates that the I/O pad is below
the lead frame.
alpha : float, optional
Angle in degrees between the xy plane and the wire bond at the
IC die I/O pad. The default is ``80``.
beta : float, optional
Angle in degrees between the xy plane and the wire bond at the
lead frame. The default is ``5``.
bond_type : int, optional
Type of the boundwire, which indicates its shape. Options are:
* ''0'' for JEDEC 5-point
* ``1`` for JEDEC 4-point
* ''2`` for Low
The default is ''0``.
diameter : float, optional
Diameter of the wire. The default is ``0.025``.
facets : int, optional
Number of wire facets. The default is ``6``.
name : str, optional
Name of the bondwire. The default is ``None``, in which case
the default name is assigned.
matname : str, optional
Name of the material. The default is ``None``, in which case
the default material is assigned.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateBondwire
Examples
--------
>>> from pyaedt import Hfss
>>> hfss = Hfss()
>>> origin = [0,0,0]
>>> endpos = [10,5,20]
>>> #Material and name are not mandatory fields
>>> object_id = hfss.modeler.primivites.create_bondwire(origin, endpos,h1=0.5, h2=0.1, alpha=75, beta=4,
... bond_type=0, name="mybox", matname="copper")
"""
x_position, y_position, z_position = self._pos_with_arg(start_position)
if x_position is None or y_position is None or z_position is None:
raise AttributeError("Position Argument must be a valid 3 Element List")
x_length, y_length, z_length = self._pos_with_arg([n - m for m, n in zip(start_position, end_position)])
if x_length is None or y_length is None or z_length is None:
raise AttributeError("Dimension Argument must be a valid 3 Element List")
if bond_type == 0:
bondwire = "JEDEC_5Points"
elif bond_type == 1:
bondwire = "JEDEC_4Points"
elif bond_type == 2:
bondwire = "LOW"
else:
self.logger.error("Wrong Profile Type")
return False
first_argument = ["NAME:BondwireParameters"]
first_argument.append("WireType:="), first_argument.append(bondwire)
first_argument.append("WireDiameter:="), first_argument.append(self._arg_with_dim(diameter))
first_argument.append("NumSides:="), first_argument.append(str(facets))
first_argument.append("XPadPos:="), first_argument.append(x_position)
first_argument.append("YPadPos:="), first_argument.append(y_position)
first_argument.append("ZPadPos:="), first_argument.append(z_position)
first_argument.append("XDir:="), first_argument.append(x_length)
first_argument.append("YDir:="), first_argument.append(y_length)
first_argument.append("ZDir:="), first_argument.append(z_length)
first_argument.append("Distance:="), first_argument.append(
self._arg_with_dim(GeometryOperators.points_distance(start_position, end_position))
)
first_argument.append("h1:="), first_argument.append(self._arg_with_dim(h1))
first_argument.append("h2:="), first_argument.append(self._arg_with_dim(h2))
first_argument.append("alpha:="), first_argument.append(self._arg_with_dim(alpha, "deg"))
first_argument.append("beta:="), first_argument.append(self._arg_with_dim(beta, "deg"))
first_argument.append("WhichAxis:="), first_argument.append("Z")
first_argument.append("ReverseDirection:="), first_argument.append(False)
second_argument = self._default_object_attributes(name=name, matname=matname)
new_object_name = self.oeditor.CreateBondwire(first_argument, second_argument)
return self._create_object(new_object_name)
@pyaedt_function_handler()
def create_rectangle(self, csPlane, position, dimension_list, name=None, matname=None, is_covered=True):
"""Create a rectangle.
Parameters
----------
csPlane : str or int
Coordinate system plane for orienting the rectangle.
:class:`pyaedt.constants.PLANE` Enumerator can be used as input.
position : list or Position
List of ``[x, y, z]`` coordinates for the center point of the rectangle or
the positionApplicationName.modeler.Position(x,y,z) object.
dimension_list : list
List of ``[width, height]`` dimensions.
name : str, optional
Name of the rectangle. The default is ``None``, in which case
the default name is assigned.
matname : str, optional
Name of the material. The default is ``None``, in which case
the default material is assigned.
is_covered : bool, optional
Whether the rectangle is covered. The default is ``True``.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateRectangle
"""
szAxis = GeometryOperators.cs_plane_to_axis_str(csPlane)
XStart, YStart, ZStart = self._pos_with_arg(position)
Width = self._arg_with_dim(dimension_list[0])
Height = self._arg_with_dim(dimension_list[1])
vArg1 = ["NAME:RectangleParameters"]
vArg1.append("IsCovered:="), vArg1.append(is_covered)
vArg1.append("XStart:="), vArg1.append(XStart)
vArg1.append("YStart:="), vArg1.append(YStart)
vArg1.append("ZStart:="), vArg1.append(ZStart)
vArg1.append("Width:="), vArg1.append(Width)
vArg1.append("Height:="), vArg1.append(Height)
vArg1.append("WhichAxis:="), vArg1.append(szAxis)
vArg2 = self._default_object_attributes(name=name, matname=matname)
new_object_name = self.oeditor.CreateRectangle(vArg1, vArg2)
return self._create_object(new_object_name)
@pyaedt_function_handler()
def create_circle(self, cs_plane, position, radius, numSides=0, is_covered=True, name=None, matname=None):
"""Create a circle.
Parameters
----------
cs_plane : str or int
Coordinate system plane for orienting the circle.
:class:`pyaedt.constants.PLANE` Enumerator can be used as input.
position : list
List of ``[x, y, z]`` coordinates for the center point of the circle.
radius : float
Radius of the circle.
numSides : int, optional
Number of sides. The default is ``0``, which is correct for a circle.
name : str, optional
Name of the circle. The default is ``None``, in which case the
default name is assigned.
matname : str, optional
Name of the material. The default is ``None``, in which case the
default material is assigned.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateCircle
"""
szAxis = GeometryOperators.cs_plane_to_axis_str(cs_plane)
XCenter, YCenter, ZCenter = self._pos_with_arg(position)
Radius = self._arg_with_dim(radius)
vArg1 = ["NAME:CircleParameters"]
vArg1.append("IsCovered:="), vArg1.append(is_covered)
vArg1.append("XCenter:="), vArg1.append(XCenter)
vArg1.append("YCenter:="), vArg1.append(YCenter)
vArg1.append("ZCenter:="), vArg1.append(ZCenter)
vArg1.append("Radius:="), vArg1.append(Radius)
vArg1.append("WhichAxis:="), vArg1.append(szAxis)
vArg1.append("NumSegments:="), vArg1.append("{}".format(numSides))
vArg2 = self._default_object_attributes(name=name, matname=matname)
new_object_name = self.oeditor.CreateCircle(vArg1, vArg2)
return self._create_object(new_object_name)
@pyaedt_function_handler()
def create_ellipse(self, cs_plane, position, major_radius, ratio, is_covered=True, name=None, matname=None):
"""Create an ellipse.
Parameters
----------
cs_plane : str or int
Coordinate system plane for orienting the ellipse.
:class:`pyaedt.constants.PLANE` Enumerator can be used as input.
position : list
List of ``[x, y, z]`` coordinates for the center point of the ellipse.
major_radius : float
Base radius of the ellipse.
ratio : float
Aspect ratio of the secondary radius to the base radius.
is_covered : bool, optional
Whether the ellipse is covered. The default is ``True``,
in which case the result is a 2D sheet object. If ``False,``
the result is a closed 1D polyline object.
name : str, optional
Name of the ellipse. The default is ``None``, in which case the
default name is assigned.
matname : str, optional
Name of the material. The default is ``None``, in which case the
default material is assigned.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateEllipse
"""
szAxis = GeometryOperators.cs_plane_to_axis_str(cs_plane)
XStart, YStart, ZStart = self._pos_with_arg(position)
MajorRadius = self._arg_with_dim(major_radius)
Ratio = self._arg_with_dim(ratio)
vArg1 = ["NAME:EllipseParameters"]
vArg1.append("IsCovered:="), vArg1.append(is_covered)
vArg1.append("XCenter:="), vArg1.append(XStart)
vArg1.append("YCenter:="), vArg1.append(YStart)
vArg1.append("ZCenter:="), vArg1.append(ZStart)
vArg1.append("MajRadius:="), vArg1.append(MajorRadius)
vArg1.append("Ratio:="), vArg1.append(Ratio)
vArg1.append("WhichAxis:="), vArg1.append(szAxis)
vArg2 = self._default_object_attributes(name=name, matname=matname)
new_object_name = self.oeditor.CreateEllipse(vArg1, vArg2)
return self._create_object(new_object_name)
@pyaedt_function_handler()
def create_equationbased_curve(
self,
x_t=0,
y_t=0,
z_t=0,
t_start=0,
t_end=1,
num_points=0,
name=None,
xsection_type=None,
xsection_orient=None,
xsection_width=1,
xsection_topwidth=1,
xsection_height=1,
xsection_num_seg=0,
xsection_bend_type=None,
):
"""Create an equation-based curve.
Parameters
----------
x_t : str or float
Expression for the X-component of the curve as a function of ``"_t"``.
For example, ``"3 * cos(_t)"``.
y_t : str or float
Expression for the Y-component of the curve as a function of ``"_t"``
z_t : str or float
Expression for the Z-component of the curve as a function of ``"_t"``
t_start : str or float
Starting value of the parameter ``"_t"``.
t_end : str or float
Ending value of the parameter ``"_t"``.
num_points : int, optional
Number of vertices on the segmented curve. The default is ``0``,
in which case the curve is non-segmented.
name : str, optional
Name of the created curve in the 3D modeler. The default is ``None``,
in which case the default name is assigned.
xsection_type : str, optional
Type of the cross-section. Choices are ``"Line"``, ``"Circle"``,
``"Rectangle"``, and ``"Isosceles Trapezoid"``. The default is ``None``.
xsection_orient : str, optional
Direction of the normal vector to the width of the cross-section.
Choices are ``"X"``, ``"Y"``, ``"Z"``, and ``"Auto"``. The default is
``None``, in which case the direction is set to ``"Auto"``.
xsection_width : float or str, optional
Width or diameter of the cross-section for all types. The
default is ``1``.
xsection_topwidth : float or str, optional
Top width of the cross-section for type ``"Isosceles Trapezoid"`` only.
The default is ``1``.
xsection_height : float or str
Height of the cross-section for types ``"Rectangle"`` and ``"Isosceles
Trapezoid"`` only. The default is ``1``.
xsection_num_seg : int, optional
Number of segments in the cross-section surface for types ``"Circle"``,
``"Rectangle"``, and ``"Isosceles Trapezoid"``. The default is ``0``. The
value must be ``0`` or greater than ``2``.
xsection_bend_type : str, optional
Type of the bend for the cross-section. The default is ``None``, in which
case the bend type is set to ``"Corner"``. For the type ``"Circle"``, the
bend type should be set to ``"Curved"``.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateEquationCurve
"""
x_section = self._crosssection_arguments(
type=xsection_type,
orient=xsection_orient,
width=xsection_width,
topwidth=xsection_topwidth,
height=xsection_height,
num_seg=xsection_num_seg,
bend_type=xsection_bend_type,
)
vArg1 = [
"NAME:EquationBasedCurveParameters",
"XtFunction:=",
str(x_t),
"YtFunction:=",
str(y_t),
"ZtFunction:=",
str(z_t),
"tStart:=",
str(t_start),
"tEnd:=",
str(t_end),
"NumOfPointsOnCurve:=",
num_points,
"Version:=",
1,
x_section,
]
vArg2 = self._default_object_attributes(name)
new_name = self.oeditor.CreateEquationCurve(vArg1, vArg2)
return self._create_object(new_name)
@pyaedt_function_handler()
def create_helix(
self,
polyline_name,
position,
x_start_dir,
y_start_dir,
z_start_dir,
num_thread=1,
right_hand=True,
radius_increment=0.0,
thread=1,
):
"""Create an helix from a polyline.
Parameters
----------
polyline_name : str
Name of the polyline used as the base for the helix.
position : list
List of ``[x, y, z]`` coordinates for the center point of the circle.
x_start_dir : float
Distance along x axis from the polyline.
y_start_dir : float
Distance along y axis from the polyline.
z_start_dir : float
Distance along z axis from the polyline.
num_thread : int, optional
Number of turns. The default value is ``1``.
right_hand : bool, optional
Whether the helix turning direction is right hand. The default value is ``True``.
radius_increment : float, optional
Radius change per turn. The default value is ``0.0``.
thread : float
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateHelix
"""
if not polyline_name or polyline_name == "":
raise ValueError("The name of the polyline cannot be an empty string.")
x_center, y_center, z_center = self._pos_with_arg(position)
vArg1 = ["NAME:Selections"]
vArg1.append("Selections:="), vArg1.append(polyline_name)
vArg1.append("NewPartsModelFlag:="), vArg1.append("Model")
vArg2 = ["NAME:HelixParameters"]
vArg2.append("XCenter:=")
vArg2.append(x_center)
vArg2.append("YCenter:=")
vArg2.append(y_center)
vArg2.append("ZCenter:=")
vArg2.append(z_center)
vArg2.append("XStartDir:=")
vArg2.append(self._arg_with_dim(x_start_dir))
vArg2.append("YStartDir:=")
vArg2.append(self._arg_with_dim(y_start_dir))
vArg2.append("ZStartDir:=")
vArg2.append(self._arg_with_dim(z_start_dir))
vArg2.append("NumThread:=")
vArg2.append(num_thread)
vArg2.append("RightHand:=")
vArg2.append(right_hand)
vArg2.append("RadiusIncrement:=")
vArg2.append(self._arg_with_dim(radius_increment))
vArg2.append("Thread:=")
vArg2.append(self._arg_with_dim(thread))
new_name = self.oeditor.CreateHelix(vArg1, vArg2)
return self._create_object(new_name)
@pyaedt_function_handler()
def convert_segments_to_line(self, object_name):
"""Convert a CreatePolyline list of segments to lines.
This method applies to splines and 3-point arguments.
Parameters
----------
object_name : int, str, or Object3d
Specified for the object.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.ChangeProperty
"""
this_object = self._resolve_object(object_name)
edges = this_object.edges
for i in reversed(range(len(edges))):
self.oeditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:Geometry3DPolylineTab",
["NAME:PropServers", this_object.name + ":CreatePolyline:1:Segment" + str(i)],
["NAME:ChangedProps", ["NAME:Segment Type", "Value:=", "Line"]],
],
]
)
return True
@pyaedt_function_handler()
def create_udm(self, udmfullname, udm_params_list, udm_library="syslib"):
"""Create a user-defined model.
Parameters
----------
udmfullname : str
Full name for the user-defined model, including the folder name.
udm_params_list :
List of user-defined object pairs for the model.
udm_library : str, optional
Name of library for the user-defined model. The default is ``"syslib"``.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
3D object.
References
----------
>>> oEditor.CreateUserDefinedModel
"""
vArg1 = ["NAME:UserDefinedModelParameters", ["NAME:Definition"], ["NAME:Options"]]
vArgParamVector = ["NAME:GeometryParams"]
for pair in udm_params_list:
if isinstance(pair, list):
name = pair[0]
val = pair[1]
else:
name = pair.Name
val = pair.Value
if isinstance(val, int):
vArgParamVector.append(
["NAME:UDMParam", "Name:=", name, "Value:=", str(val), "PropType2:=", 3, "PropFlag2:=", 2]
)
elif str(val)[0] in "0123456789":
vArgParamVector.append(
["NAME:UDMParam", "Name:=", name, "Value:=", str(val), "PropType2:=", 3, "PropFlag2:=", 4]
)
else:
vArgParamVector.append(
[
"NAME:UDMParam",
"Name:=",
name,
"Value:=",
str(val),
"DataType:=",
"String",
"PropType2:=",
1,
"PropFlag2:=",
0,
]
)
vArg1.append(vArgParamVector)
vArg1.append("DllName:=")
vArg1.append(udmfullname)
vArg1.append("Library:=")
vArg1.append(udm_library)
vArg1.append("Version:=")
vArg1.append("2.0")
vArg1.append("ConnectionID:=")
vArg1.append("")
oname = self.oeditor.CreateUserDefinedModel(vArg1)
if oname:
object_lists = self.oeditor.GetPartsForUserDefinedModel(oname)
for new_name in object_lists:
self._create_object(new_name)
return True
else:
return False
@pyaedt_function_handler()
def create_spiral(
self,
internal_radius=10,
spacing=1,
faces=8,
turns=10,
width=2,
thickness=1,
elevation=0,
material="copper",
name=None,
):
"""Create a spiral inductor from a polyline.
Parameters
----------
internal_radius : float, optional
Internal starting point of spiral. Default is `10`.
spacing : float, optional
Internal pitch between two turns. Default is `1`.
faces : int, optional
Number of faces per turn. Default is `8` as an octagon.
turns : int, optional
Number of turns. Default is `10`.
width : float, optional
Spiral width. Default is `2`.
thickness : float, optional
Spiral thickness. Default is `1`.
elevation : float, optional
Spiral elevation. Default is`0`.
material : str, optional
Spiral material. Default is `"copper"`.
name : str, optional
Spiral name. Default is `None`.
Returns
-------
:class:`pyaedt.modeler.Object3d.Polyline`
Polyline object.
"""
assert internal_radius > 0, "Internal Radius must be greater than 0."
assert faces > 0, "Faces must be greater than 0."
dtheta = 2 * pi / faces
theta = pi / 2
pts = [(internal_radius, 0, elevation), (internal_radius, internal_radius * tan(dtheta / 2), elevation)]
rin = internal_radius * tan(dtheta / 2) * 2
x = rin
r = rin
for i in range(faces):
r += 1
theta += dtheta
x = x + r * cos(theta)
dr = (width + spacing) / (x - rin)
for i in range(turns * faces - int(faces / 2) - 1):
rin += dr
theta += dtheta
x0, y0 = pts[-1][:2]
x1, y1 = x0 + rin * cos(theta), y0 + rin * sin(theta)
pts.append((x1, y1, elevation))
pts.append((x1, 0, elevation))
p1 = self.create_polyline(
pts, xsection_type="Rectangle", xsection_width=width, xsection_height=thickness, matname=material
)
if name:
p1.name = name
return p1
@pyaedt_function_handler()
def insert_3d_component(self, compFile, geoParams=None, szMatParams="", szDesignParams="", targetCS="Global"):
"""Insert a new 3D component.
Parameters
----------
compFile : str
Name of the component file.
geoParams : dict, optional
Geometrical parameters.
szMatParams : str, optional
Material parameters. The default is ``""``.
szDesignParams : str, optional
Design parameters. The default is ``""``.
targetCS : str, optional
Target coordinate system. The default is ``"Global"``.
Returns
-------
str
Name of the created 3D component.
References
----------
>>> oEditor.Insert3DComponent
"""
vArg1 = ["NAME:InsertComponentData"]
sz_geo_params = ""
if not geoParams:
geometryparams = self._app.get_components3d_vars(compFile)
if geometryparams:
geoParams = geometryparams
if geoParams:
sz_geo_params = "".join(["{0}='{1}' ".format(par, val) for par, val in geoParams.items()])
vArg1.append("TargetCS:=")
vArg1.append(targetCS)
vArg1.append("ComponentFile:=")
vArg1.append(compFile)
vArg1.append("IsLocal:=")
vArg1.append(False)
vArg1.append("UniqueIdentifier:=")
vArg1.append("")
varg2 = ["NAME:InstanceParameters"]
varg2.append("GeometryParameters:=")
varg2.append(sz_geo_params)
varg2.append("MaterialParameters:=")
varg2.append(szMatParams)
varg2.append("DesignParameters:=")
varg2.append(szDesignParams)
vArg1.append(varg2)
new_object_name = self.oeditor.Insert3DComponent(vArg1)
# TODO return an object
self.refresh_all_ids()
return new_object_name
@pyaedt_function_handler()
def get_3d_component_object_list(self, componentname):
"""Retrieve all objects belonging to a 3D component.
Parameters
----------
componentname : str
Name of the 3D component.
Returns
-------
List
List of objects belonging to the 3D component.
References
----------
>>> oeditor.GetChildObject
"""
if self._app._is_object_oriented_enabled():
compobj = self.oeditor.GetChildObject(componentname)
if compobj:
return list(compobj.GetChildNames())
else:
self.logger.warning("Object Oriented Beta Option is not enabled in this Desktop.")
return []
@pyaedt_function_handler()
def _check_actor_folder(self, actor_folder):
if not os.path.exists(actor_folder):
self.logger.error("Folder {} does not exist.".format(actor_folder))
return False
if not any(fname.endswith(".json") for fname in os.listdir(actor_folder)) or not any(
fname.endswith(".a3dcomp") for fname in os.listdir(actor_folder)
):
self.logger.error("At least one json and one a3dcomp file is needed.")
return False
return True
@pyaedt_function_handler()
def _initialize_multipart(self):
if MultiPartComponent._t in self._app._variable_manager.independent_variable_names:
return True
else:
return MultiPartComponent.start(self._app)
@pyaedt_function_handler()
def add_person(
self,
actor_folder,
speed=0.0,
global_offset=[0, 0, 0],
yaw=0,
pitch=0,
roll=0,
relative_cs_name=None,
actor_name=None,
):
"""Add a Walking Person Multipart from 3D Components.
It requires a json file in the folder containing person
infos. An example json file follows:
.. code-block:: json
{
"name": "person3",
"version": 1,
"class":"person",
"stride":"0.76meter",
"xlim":["-.43",".43"],
"ylim":["-.25",".25"],
"parts": {
"arm_left": {
"comp_name": "arm_left.a3dcomp",
"rotation_cs":["-.04","0","1.37"],
"rotation":"-30deg",
"compensation_angle":"-15deg",
"rotation_axis":"Y"
},
"arm_right": {
"comp_name": "arm_right.a3dcomp",
"rotation_cs":["0","0","1.37"],
"rotation":"30deg",
"compensation_angle":"30deg",
"rotation_axis":"Y"
},
"leg_left": {
"comp_name": "leg_left.a3dcomp",
"rotation_cs":["0","0",".9"],
"rotation":"20deg",
"compensation_angle":"22.5deg",
"rotation_axis":"Y"
},
"leg_right": {
"comp_name": "leg_right.a3dcomp",
"rotation_cs":["-.04","0",".9375"],
"rotation":"-20deg",
"compensation_angle":"-22.5deg",
"rotation_axis":"Y"
},
"torso": {
"comp_name": "torso.a3dcomp",
"rotation_cs":null,
"rotation":null,
"compensation_angle":null,
"rotation_axis":null
}
}
}
Parameters
----------
actor_folder : str
Path to the actor folder. It must contain a json settings
file and a 3dcomponent (.a3dcomp).
speed : float, optional
Object movement speed with time (m_per_sec).
global_offset : list, optional
Offset from Global Coordinate System [x,y,z] in meters.
yaw : float, optional
Yaw Rotation from Global Coordinate System in deg.
pitch : float, optional
Pitch Rotation from Global Coordinate System in deg.
roll : float, optional
Roll Rotation from Global Coordinate System in deg.
relative_cs_name : str
Relative CS Name of the actor. ``None`` for Global CS.
actor_name : str
If provided, it overrides the actor name in the JSON.
Returns
-------
:class:`pyaedt.modeler.actors.Person`
References
----------
>>> oEditor.Insert3DComponent
"""
self._initialize_multipart()
if not self._check_actor_folder(actor_folder):
return False
person1 = Person(actor_folder, speed=speed, relative_cs_name=relative_cs_name)
if actor_name:
person1._name = actor_name
person1.offset = global_offset
person1.yaw = self._arg_with_dim(yaw, "deg")
person1.pitch = self._arg_with_dim(pitch, "deg")
person1.roll = self._arg_with_dim(roll, "deg")
person1.insert(self._app)
self.multiparts.append(person1)
return person1
@pyaedt_function_handler()
def add_vehicle(
self,
actor_folder,
speed=0,
global_offset=[0, 0, 0],
yaw=0,
pitch=0,
roll=0,
relative_cs_name=None,
actor_name=None,
):
"""Add a Moving Vehicle Multipart from 3D Components.
It requires a json file in the folder containing vehicle
infos. An example json file follows:
.. code-block:: json
{
"name": "vehicle3",
"version": 1,
"type":"mustang",
"class":"vehicle",
"xlim":["-1.94","2.8"],
"ylim":["-.91",".91"],
"parts": {
"wheels_front": {
"comp_name": "wheels_front.a3dcomp",
"rotation_cs":["1.8970271810532" ,"0" ,"0.34809664860487"],
"tire_radius":"0.349",
"rotation_axis":"Y"
},
"wheels_rear": {
"comp_name": "wheels_rear.a3dcomp",
"rotation_cs":["-0.82228746728897" ,"0","0.34809664860487"],
"tire_radius":"0.349",
"rotation_axis":"Y"
},
"body": {
"comp_name": "body.a3dcomp",
"rotation_cs":null,
"tire_radius":null,
"rotation_axis":null
}
}
}
Parameters
----------
actor_folder : str
Path to the actor directory. It must contain a json settings file
and a 3dcomponent (``.a3dcomp`` file).
speed : float, optional
Object movement speed with time (m_per_sec).
global_offset : list, optional
Offset from Global Coordinate System [x,y,z] in meters.
yaw : float, optional
Yaw Rotation from Global Coordinate System in deg.
pitch : float, optional
Pitch Rotation from Global Coordinate System in deg.
roll : float, optional
Roll Rotation from Global Coordinate System in deg.
relative_cs_name : str
Relative CS Name of the actor. ``None`` for Global CS.
Returns
-------
:class:`pyaedt.modeler.actors.Vehicle`
References
----------
>>> oEditor.Insert3DComponent
"""
self._initialize_multipart()
if not self._check_actor_folder(actor_folder):
return False
vehicle = Vehicle(actor_folder, speed=speed, relative_cs_name=relative_cs_name)
if actor_name:
vehicle._name = actor_name
vehicle.offset = global_offset
vehicle.yaw = self._arg_with_dim(yaw, "deg")
vehicle.pitch = self._arg_with_dim(pitch, "deg")
vehicle.roll = self._arg_with_dim(roll, "deg")
vehicle.insert(self._app)
self.multiparts.append(vehicle)
return vehicle
@pyaedt_function_handler()
def add_bird(
self,
actor_folder,
speed=0,
global_offset=[0, 0, 0],
yaw=0,
pitch=0,
roll=0,
flapping_rate=50,
relative_cs_name=None,
actor_name=None,
):
"""Add a Bird Multipart from 3D Components.
It requires a json file in the folder containing bird infos. An example json file is showed here.
.. code-block:: json
{
"name": "bird1",
"version": 1,
"class":"bird",
"xlim":["-.7","2.75"],
"ylim":["-1.2","1.2"],
"parts": {
"body": {
"comp_name": "body.a3dcomp",
"rotation_cs":null,
"rotation":null,
"rotation_axis":null
},
"wing_right": {
"comp_name": "wing_left.a3dcomp",
"rotation_cs":[".001778" ,".00508" ,".00762"],
"rotation":"-45deg",
"rotation_axis":"X"
},
"wing_left": {
"comp_name": "wing_right.a3dcomp",
"rotation_cs":[".001778" ,"-.00508" ,".00762"],
"rotation":"45deg",
"rotation_axis":"X"
},
"tail": {
"comp_name": "tail.a3dcomp",
"rotation_cs":null,
"rotation":null,
"rotation_axis":null
},
"beak": {
"comp_name": "beak.a3dcomp",
"rotation_cs":null,
"rotation":null,
"rotation_axis":null
}
}
}
Parameters
----------
actor_folder : str
Path to the actor directory. It must contain a json settings file and a
3dcomponent (``.a3dcomp`` file)
speed : float, optional
Object movement speed with time (m_per_sec).
global_offset : list, optional
Offset from Global Coordinate System [x,y,z] in meters.
yaw : float, optional
Yaw Rotation from Global Coordinate System in deg.
pitch : float, optional
Pitch Rotation from Global Coordinate System in deg.
roll : float, optional
Roll Rotation from Global Coordinate System in deg.
flapping_rate : float, optional
Motion flapping rate in Hz.
relative_cs_name : str
Relative CS Name of the actor. ``None`` for Global CS.
Returns
-------
:class:`pyaedt.modeler.actors.Bird`
References
----------
>>> oEditor.Insert3DComponent
Examples
--------
>>> from pyaedt import Hfss
>>> app = Hfss()
>>> bird_dir = "path/to/bird/directory"
>>> bird1 = app.modeler.add_bird(bird_dir, 1.0, [19, 4, 3], 120, -5, flapping_rate=30)
"""
self._initialize_multipart()
if not self._check_actor_folder(actor_folder):
return False
bird = Bird(
actor_folder,
speed=speed,
flapping_rate=self._arg_with_dim(flapping_rate, "Hz"),
relative_cs_name=relative_cs_name,
)
if actor_name:
bird._name = actor_name
bird.offset = global_offset
bird.yaw = self._arg_with_dim(yaw, "deg")
bird.pitch = self._arg_with_dim(pitch, "deg")
bird.roll = self._arg_with_dim(roll, "deg")
bird.insert(self._app)
self.multiparts.append(bird)
return bird
@pyaedt_function_handler()
def add_environment(
self, env_folder, global_offset=[0, 0, 0], yaw=0, pitch=0, roll=0, relative_cs_name=None, environment_name=None
):
"""Add an Environment Multipart Component from Json file.
.. code-block:: json
{
"name": "open1",
"version": 1,
"class":"environment",
"xlim":["-5","95"],
"ylim":["-60","60"],
"parts": {
"open_area": {
"comp_name": "open1.a3dcomp",
"offset":null,
"rotation_cs":null,
"rotation":null,
"rotation_axis":null,
"duplicate_number":null,
"duplicate_vector":null
}
}
}
Parameters
----------
env_folder : str
Path to the actor directory. It must contain a json
settings file and a 3dcomponent (``.a3dcomp`` file).
global_offset : list, optional
Offset from Global Coordinate System [x,y,z] in meters.
yaw : float, optional
Yaw Rotation from Global Coordinate System in deg.
pitch : float, optional
Pitch Rotation from Global Coordinate System in deg.
roll : float, optional
Roll Rotation from Global Coordinate System in deg.
relative_cs_name : str
Relative CS Name of the actor. ``None`` for Global CS.
Returns
-------
:class:`pyaedt.modeler.multiparts.Environment`
References
----------
>>> oEditor.Insert3DComponent
"""
self._initialize_multipart()
if not self._check_actor_folder(env_folder):
return False
environment = Environment(env_folder, relative_cs_name=relative_cs_name)
if environment_name:
environment._name = environment_name
environment.offset = global_offset
environment.yaw = self._arg_with_dim(yaw, "deg")
environment.pitch = self._arg_with_dim(pitch, "deg")
environment.roll = self._arg_with_dim(roll, "deg")
environment.insert(self._app)
self.multiparts.append(environment)
return environment
@pyaedt_function_handler()
def create_choke(self, json_file):
"""Create a chock from json setting file.
Parameters
----------
json_file : str
Full path of the json file return for the function check_choke_values.
Returns
-------
List of
bool
``True`` when successful, ``False`` when failed.
:class:`pyaedt.modeler.Object3d.Object3d`
3D object core.
list of
:class:`pyaedt.modeler.Object3d.Object3d`
3D object winding.
list
list of point coordinates of the winding.
for each winding.
[bool, core_obj, [first_winding_obj, first_winding_point_list],
[second_winding_obj, second_winding_point_list], etc...]
Examples
--------
Json file has to be like the following example.
>>> from pyaedt import Hfss
>>> hfss = Hfss()
>>> dictionary_values = hfss.modeler.check_choke_values("C:/Example/Of/Path/myJsonFile.json")
>>> mychoke = hfss.modeler.create_choke("C:/Example/Of/Path/myJsonFile_Corrected.json")
"""
with open(json_file, "r") as read_file:
values = json.load(read_file)
self.logger.info("CHOKE INFO: " + str(values))
security_factor = 1.1
sr = security_factor
segment_number = 0
if values["Wire Section"]["Hexagon"]:
segment_number = 6
section = "Circle"
elif values["Wire Section"]["Octagon"]:
segment_number = 8
section = "Circle"
elif values["Wire Section"]["Circle"]:
section = "Circle"
else:
section = None
sep_layer = values["Layer Type"]["Separate"]
name_core = values["Core"]["Name"]
material_core = values["Core"]["Material"]
in_rad_core = values["Core"]["Inner Radius"]
out_rad_core = values["Core"]["Outer Radius"]
height_core = values["Core"]["Height"]
chamfer = values["Core"]["Chamfer"]
name_wind = values["Outer Winding"]["Name"]
material_wind = values["Outer Winding"]["Material"]
in_rad_wind = values["Outer Winding"]["Inner Radius"]
out_rad_wind = values["Outer Winding"]["Outer Radius"]
height_wind = values["Outer Winding"]["Height"]
w_dia = values["Outer Winding"]["Wire Diameter"]
turns = values["Outer Winding"]["Turns"]
turns2 = values["Mid Winding"]["Turns"]
turns3 = values["Inner Winding"]["Turns"]
teta = values["Outer Winding"]["Coil Pit(deg)"]
teta2 = values["Mid Winding"]["Coil Pit(deg)"]
teta3 = values["Inner Winding"]["Coil Pit(deg)"]
chamf = self._make_winding_follow_chamfer(chamfer, sr, w_dia, 1)
returned_list = [
self._make_core(name_core, material_core, in_rad_core, out_rad_core, height_core, chamfer),
]
if values["Layer"]["Double"]:
if values["Layer Type"]["Linked"]:
list_object = self._make_double_linked_winding(
name_wind,
material_wind,
in_rad_wind,
out_rad_wind,
height_wind,
w_dia,
teta,
teta2,
turns,
turns2,
chamfer,
chamf,
sr,
)
print("make_double_linked_winding")
else:
list_object = self._make_double_winding(
name_wind,
material_wind,
in_rad_wind,
out_rad_wind,
height_wind,
w_dia,
teta,
teta2,
turns,
turns2,
chamfer,
chamf,
sr,
sep_layer,
)
print("make_double_winding")
elif values["Layer"]["Triple"]:
if values["Layer Type"]["Linked"]:
list_object = self._make_triple_linked_winding(
name_wind,
material_wind,
in_rad_wind,
out_rad_wind,
height_wind,
w_dia,
teta,
teta2,
teta3,
turns,
turns2,
turns3,
chamfer,
chamf,
sr,
)
print("make_triple_linked_winding")
else:
list_object = self._make_triple_winding(
name_wind,
material_wind,
in_rad_wind,
out_rad_wind,
height_wind,
w_dia,
teta,
teta2,
teta3,
turns,
turns2,
turns3,
chamfer,
chamf,
sr,
sep_layer,
)
print("make_triple_winding")
else:
list_object = self._make_winding(
name_wind, material_wind, in_rad_wind, out_rad_wind, height_wind, teta, turns, chamf, sep_layer
)
print("make_winding")
list_duplicated_object = []
if type(list_object[0]) == list:
for i in range(len(list_object)):
success = list_object[i][0].set_crosssection_properties(
type=section, width=w_dia, num_seg=segment_number
)
returned_list = returned_list + list_object
else:
success = list_object[0].set_crosssection_properties(type=section, width=w_dia, num_seg=segment_number)
returned_list.append(list_object)
for key in values["Number of Windings"].keys():
if values["Number of Windings"][key]:
number_duplication = int(key)
if number_duplication >= 2:
if values["Mode"]["Common"] and number_duplication == 2:
if type(list_object[0]) == list:
for i in range(len(list_object)):
duplication = self.create_polyline(
position_list=list_object[i][1], name=name_wind, matname=material_wind
)
duplication.mirror([0, 0, 0], [-1, 0, 0])
duplication_points = self.get_vertices_of_line(duplication.name)
success = duplication.set_crosssection_properties(
type=section, width=w_dia, num_seg=segment_number
)
list_duplicated_object.append([duplication, duplication_points])
else:
duplication = self.create_polyline(
position_list=list_object[1], name=name_wind, matname=material_wind
)
duplication.mirror([0, 0, 0], [-1, 0, 0])
duplication_points = self.get_vertices_of_line(duplication.name)
success = duplication.set_crosssection_properties(type=section, width=w_dia, num_seg=segment_number)
list_duplicated_object.append([duplication, duplication_points])
else:
if type(list_object[0]) == list:
for j in range(number_duplication - 1):
for i in range(len(list_object)):
duplication = self.create_polyline(
position_list=list_object[i][1], name=name_wind, matname=material_wind
)
duplication.rotate("Z", (j + 1) * 360 / number_duplication)
duplication_points = self.get_vertices_of_line(duplication.name)
success = duplication.set_crosssection_properties(
type=section, width=w_dia, num_seg=segment_number
)
list_duplicated_object.append([duplication, duplication_points])
else:
for j in range(number_duplication - 1):
duplication = self.create_polyline(
position_list=list_object[1], name=name_wind, matname=material_wind
)
duplication.rotate("Z", (j + 1) * 360 / number_duplication)
duplication_points = self.get_vertices_of_line(duplication.name)
success = duplication.set_crosssection_properties(
type=section, width=w_dia, num_seg=segment_number
)
list_duplicated_object.append([duplication, duplication_points])
returned_list = returned_list + list_duplicated_object
returned_list.insert(0, success)
return returned_list
@pyaedt_function_handler()
def _make_winding(self, name, material, in_rad, out_rad, height, teta, turns, chamf, sep_layer):
teta_r = radians(teta)
points_list1 = [
[in_rad * cos(teta_r), -in_rad * sin(teta_r), height / 2 - chamf],
[(in_rad + chamf) * cos(teta_r), -(in_rad + chamf) * sin(teta_r), height / 2],
[out_rad - chamf, 0, height / 2],
[out_rad, 0, height / 2 - chamf],
[out_rad, 0, -height / 2 + chamf],
[out_rad - chamf, 0, -height / 2],
[(in_rad + chamf) * cos(teta_r), (in_rad + chamf) * sin(teta_r), -height / 2],
[in_rad * cos(teta_r), in_rad * sin(teta_r), -height / 2 + chamf],
[in_rad * cos(teta_r), in_rad * sin(teta_r), height / 2 - chamf],
]
polyline = self.create_polyline(position_list=points_list1, name=name, matname=material)
union_polyline1 = [polyline.name]
if turns > 1:
union_polyline2 = polyline.duplicate_around_axis(
cs_axis="Z", angle=2 * teta, nclones=turns, create_new_objects=True
)
else:
union_polyline2 = []
union_polyline = union_polyline1 + union_polyline2
list_positions = []
for i in range(len(union_polyline)):
list_positions = list_positions + self.get_vertices_of_line(union_polyline[i])
self.delete(union_polyline)
if sep_layer:
for i in range(4):
list_positions.pop()
list_positions.insert(0, [list_positions[0][0], list_positions[0][1], -height])
list_positions.append([list_positions[-1][0], list_positions[-1][1], -height])
true_polyline = self.create_polyline(position_list=list_positions, name=name, matname=material)
true_polyline.rotate("Z", 180 - (turns - 1) * teta)
list_positions = self.get_vertices_of_line(true_polyline.name)
return [true_polyline, list_positions]
return list_positions
@pyaedt_function_handler()
def _make_double_linked_winding(
self,
name,
material,
in_rad,
out_rad,
height,
w_dia,
teta,
teta_in_wind,
turns,
turns_in_wind,
chamfer,
chamf_in_wind,
sr,
):
list_object = self._make_double_winding(
name,
material,
in_rad,
out_rad,
height,
w_dia,
teta,
teta_in_wind,
turns,
turns_in_wind,
chamfer,
chamf_in_wind,
sr,
False,
)
points_out_wind = list_object[0]
points_in_wind = list_object[1]
for i in range(2):
points_out_wind.pop(0)
points_out_wind.pop()
points_out_wind.pop()
points_out_wind[-1] = [points_out_wind[-2][0], points_out_wind[-2][1], -height]
points_in_wind.insert(0, [points_in_wind[0][0], points_in_wind[0][1], -height])
points_in_wind[-1] = [points_in_wind[-2][0], points_in_wind[-2][1], points_out_wind[1][2]]
points_in_wind.append([points_in_wind[-3][0], points_in_wind[-3][1], points_out_wind[0][2]])
outer_polyline = self.create_polyline(position_list=points_out_wind, name=name, matname=material)
outer_polyline.rotate("Z", 180 - (turns - 1) * teta)
inner_polyline = self.create_polyline(position_list=points_in_wind, name=name, matname=material)
inner_polyline.rotate("Z", 180 - (turns_in_wind - 1) * teta_in_wind)
outer_polyline.mirror([0, 0, 0], [0, -1, 0])
outer_polyline.rotate("Z", turns_in_wind * teta_in_wind - turns * teta)
list_polyline = [inner_polyline.name, outer_polyline.name]
list_positions = []
for i in range(len(list_polyline)):
list_positions = list_positions + self.get_vertices_of_line(list_polyline[i])
self.delete(list_polyline)
true_polyline = self.create_polyline(position_list=list_positions, name=name, matname=material)
return [true_polyline, list_positions]
@pyaedt_function_handler()
def _make_triple_linked_winding(
self,
name,
material,
in_rad,
out_rad,
height,
w_dia,
teta,
teta_mid_wind,
teta_in_wind,
turns,
turns_mid_wind,
turns_in_wind,
chamfer,
chamf_in_wind,
sr,
):
list_object = self._make_triple_winding(
name,
material,
in_rad,
out_rad,
height,
w_dia,
teta,
teta_mid_wind,
teta_in_wind,
turns + 1,
turns_mid_wind,
turns_in_wind,
chamfer,
chamf_in_wind,
sr,
False,
)
points_out_wind = list_object[0]
points_mid_wind = list_object[1]
points_in_wind = list_object[2]
for i in range(3):
points_out_wind.pop(0)
points_out_wind.pop(0)
points_out_wind.pop()
points_out_wind[-1] = [points_out_wind[-2][0], points_out_wind[-2][1], -height]
for i in range(2):
points_mid_wind.pop(0)
points_mid_wind.pop()
points_mid_wind.pop()
points_mid_wind[-1] = [points_mid_wind[-2][0], points_mid_wind[-2][1], points_out_wind[1][2]]
points_mid_wind.append([points_mid_wind[-4][0], points_mid_wind[-4][1], points_out_wind[0][2]])
points_in_wind.insert(0, [points_in_wind[0][0], points_in_wind[0][1], -height])
points_in_wind[-1] = [points_in_wind[-2][0], points_in_wind[-2][1], points_mid_wind[1][2]]
points_in_wind.append([points_in_wind[-3][0], points_in_wind[-3][1], points_mid_wind[0][2]])
outer_polyline = self.create_polyline(position_list=points_out_wind, name=name, matname=material)
outer_polyline.rotate("Z", 180 - (turns - 1) * teta)
mid_polyline = self.create_polyline(position_list=points_mid_wind, name=name, matname=material)
mid_polyline.rotate("Z", 180 - (turns_mid_wind - 1) * teta_mid_wind)
inner_polyline = self.create_polyline(position_list=points_in_wind, name=name, matname=material)
inner_polyline.rotate("Z", 180 - (turns_in_wind - 1) * teta_in_wind)
mid_polyline.mirror([0, 0, 0], [0, -1, 0])
outer_polyline.rotate("Z", turns * teta - turns_mid_wind * teta_mid_wind)
mid_polyline.rotate("Z", turns_in_wind * teta_in_wind - turns_mid_wind * teta_mid_wind)
outer_polyline.rotate("Z", turns_in_wind * teta_in_wind - turns_mid_wind * teta_mid_wind)
list_polyline = [inner_polyline.name, mid_polyline.name, outer_polyline.name]
list_positions = []
for i in range(len(list_polyline)):
list_positions = list_positions + self.get_vertices_of_line(list_polyline[i])
self.delete(list_polyline)
true_polyline = self.create_polyline(position_list=list_positions, name=name, matname=material)
return [true_polyline, list_positions]
@pyaedt_function_handler()
def _make_double_winding(
self,
name,
material,
in_rad,
out_rad,
height,
w_dia,
teta,
teta_in_wind,
turns,
turns_in_wind,
chamfer,
chamf_in_wind,
sr,
sep_layer,
):
chamf = self._make_winding_follow_chamfer(chamfer, sr, w_dia, 3)
in_rad_in_wind = in_rad + sr * w_dia
out_rad_in_wind = out_rad - sr * w_dia
height_in_wind = height - 2 * sr * w_dia
list_object = [
self._make_winding(name, material, in_rad, out_rad, height, teta, turns, chamf, sep_layer),
self._make_winding(
name,
material,
in_rad_in_wind,
out_rad_in_wind,
height_in_wind,
teta_in_wind,
turns_in_wind,
chamf_in_wind,
sep_layer,
),
]
return list_object
@pyaedt_function_handler()
def _make_triple_winding(
self,
name,
material,
in_rad,
out_rad,
height,
w_dia,
teta,
teta_mid_wind,
teta_in_wind,
turns,
turns_mid_wind,
turns_in_wind,
chamfer,
chamf_in_wind,
sr,
sep_layer,
):
chamf = self._make_winding_follow_chamfer(chamfer, sr, w_dia, 5)
chamf_mid_wind = self._make_winding_follow_chamfer(chamfer, sr, w_dia, 3)
in_rad_in_wind = in_rad + 2 * sr * w_dia
in_rad_mid_wind = in_rad + sr * w_dia
out_rad_in_wind = out_rad - 2 * sr * w_dia
out_rad_mid_wind = out_rad - sr * w_dia
height_in_wind = height - 4 * sr * w_dia
height_mid_wind = height - 2 * sr * w_dia
list_object = [
self._make_winding(name, material, in_rad, out_rad, height, teta, turns, chamf, sep_layer),
self._make_winding(
name,
material,
in_rad_mid_wind,
out_rad_mid_wind,
height_mid_wind,
teta_mid_wind,
turns_mid_wind,
chamf_mid_wind,
sep_layer,
),
self._make_winding(
name,
material,
in_rad_in_wind,
out_rad_in_wind,
height_in_wind,
teta_in_wind,
turns_in_wind,
chamf_in_wind,
sep_layer,
),
]
return list_object
@pyaedt_function_handler()
def _make_core(self, name, material, in_rad, out_rad, height, chamfer):
tool = self.create_cylinder("Z", [0, 0, -height / 2], in_rad, height, 0, "Tool", matname=material)
core = self.create_cylinder("Z", [0, 0, -height / 2], out_rad, height, 0, name=name, matname=material)
core.subtract(tool, False)
for n in core.edges:
n.chamfer(chamfer)
return core
@pyaedt_function_handler()
def check_choke_values(self, json_file, create_another_file=True):
"""Verify the values in the json file and create another one with corrected values next to the first one.
Parameters
----------
json_file : str
Full path to json file;
Specific json file containing all the parameters to design your on choke.
create_another_file : bool
Create another file next to the first one in adding _Corrected to the file name if it is True
else truncate the existing file
Returns
-------
List
``True`` when successful, ``False`` when failed.
dictionary : class : 'dict'
Examples
--------
Dictionary of the Json file has to be like the following example :
dictionary = {
"Number of Windings": {"1": True, "2": False, "3": False, "4": False},
"Layer": {"Simple": True, "Double": False, "Triple": False},
"Layer Type": {"Separate": True, "Linked": False},
"Similar Layer": {"Similar": True, "Different": False},
"Mode": {"Differential": True, "Common": False},
"Wire Section": {"None": False, "Hexagon": False, "Octagon": True, "Circle": False},
"Core": {"Name": "Core", "Material": "ferrite", "Inner Radius": 11, "Outer Radius": 17, "Height": 7,
"Chamfer": 0.8},
"Outer Winding": {"Name": "Winding", "Material": "copper", "Inner Radius": 12, "Outer Radius": 16,
"Height": 8, "Wire Diameter": 1, "Turns": 10, "Coil Pit(deg)": 9, "Occupation(%)": 0},
"Mid Winding": {"Turns": 8, "Coil Pit(deg)": 0.1, "Occupation(%)": 0},
"Inner Winding": {"Turns": 12, "Coil Pit(deg)": 0.1, "Occupation(%)": 0}
}
>>> import json
>>> with open("C:/Example/Of/Path/myJsonFile.json", "w") as outfile:
>>> json.dump(dictionary, outfile)
>>> from pyaedt import Hfss
>>> hfss = Hfss()
>>> dictionary_values = hfss.modeler.check_choke_values("C:/Example/Of/Path/myJsonFile.json")
"""
dictionary_model = {
"Number of Windings": {"1": True, "2": False, "3": False, "4": False},
"Layer": {"Simple": True, "Double": False, "Triple": False},
"Layer Type": {"Separate": True, "Linked": False},
"Similar Layer": {"Similar": True, "Different": False},
"Mode": {"Differential": True, "Common": False},
"Wire Section": {"None": False, "Hexagon": False, "Octagon": True, "Circle": False},
"Core": {
"Name": "Core",
"Material": "ferrite",
"Inner Radius": 11,
"Outer Radius": 17,
"Height": 7,
"Chamfer": 0.8,
},
"Outer Winding": {
"Name": "Winding",
"Material": "copper",
"Inner Radius": 12,
"Outer Radius": 16,
"Height": 8,
"Wire Diameter": 1,
"Turns": 10,
"Coil Pit(deg)": 9,
"Occupation(%)": 0,
},
"Mid Winding": {"Turns": 8, "Coil Pit(deg)": 0.1, "Occupation(%)": 0},
"Inner Winding": {"Turns": 12, "Coil Pit(deg)": 0.1, "Occupation(%)": 0},
}
are_inequations_checkable = True
security_factor = 1.1
sr = security_factor
with open(json_file, "r") as read_file:
values = json.load(read_file)
for key, value in dictionary_model.items():
if key not in values:
self.logger.error("Missing or incorrect key {}.".format(key))
return [False, values]
if isinstance(value, dict):
for k, v in value.items():
if k not in values[key]:
self.logger.error("Missing or incorrect key {}.".format(k))
return [False, values]
for f_key in values.keys():
count_true = False
if (
f_key == "Number of Windings"
or f_key == "Layer"
or f_key == "Layer Type"
or f_key == "Similar Layer"
or f_key == "Mode"
or f_key == "Wire Section"
):
for s_key in values[f_key].keys():
if type(values[f_key][s_key]) == bool:
if count_true:
values[f_key][s_key] = False
if values[f_key][s_key]:
count_true = True
else:
self.logger.error(
"A character entered is invalid. The values of the dictionary %s must be boolean" % f_key
)
are_inequations_checkable = False
break
try:
core_name = str(values["Core"]["Name"])
if len(core_name) > 0:
values["Core"]["Name"] = core_name
except:
self.logger.warning("Core Name must be a non-null string. A default name Core has been set.")
values["Core"]["Name"] = "Core"
try:
core_material = str(values["Core"]["Material"])
if len(core_material) > 0:
if self.materials.checkifmaterialexists(core_material):
values["Core"]["Material"] = self.materials._get_aedt_case_name(core_material)
else:
self.logger.error(
"%s is not in the material library."
" It can be add using the method add_material" % core_material
)
values["Core"]["Material"] = "ferrite"
except:
self.logger.warning("Core Material must be a non-null string. A default material Core has been set.")
values["Core"]["Material"] = "ferrite"
try:
winding_name = str(values["Outer Winding"]["Name"])
if len(winding_name) > 0:
values["Outer Winding"]["Name"] = winding_name
except:
self.logger.warning("Outer Winding Name must be a non-null string. A default name Winding has been set.")
values["Outer Winding"]["Name"] = "Winding"
try:
winding_material = str(values["Outer Winding"]["Material"])
if len(winding_material) > 0:
if self.materials.checkifmaterialexists(winding_material):
values["Outer Winding"]["Material"] = self.materials._get_aedt_case_name(winding_material)
else:
self.logger.error(
"%s is not in the material library."
" It can be add using the method add_material" % winding_material
)
values["Outer Winding"]["Material"] = "copper"
except:
self.logger.warning(
"Outer Winding Material must be a non-null string." " A default material Winding has been set."
)
values["Outer Winding"]["Material"] = "copper"
in_rad_core, are_inequations_checkable = self._check_value_type(
values["Core"]["Inner Radius"],
float,
are_inequations_checkable,
"Inner Radius(Core)",
"a strictly positive float",
)
out_rad_core, are_inequations_checkable = self._check_value_type(
values["Core"]["Outer Radius"],
float,
are_inequations_checkable,
"Outer Radius(Core)",
"a strictly positive float",
)
height_core, are_inequations_checkable = self._check_value_type(
values["Core"]["Height"], float, are_inequations_checkable, "Height(Core)", "a strictly positive float"
)
try:
core_chamfer = float(values["Core"]["Chamfer"])
if core_chamfer < 0:
self.logger.error(
"The character entered is invalid. Chamfer must be a positive float." " It must be changed"
)
are_inequations_checkable = False
except:
self.logger.error(
"The character entered is invalid. Chamfer must be a positive float." " It must be changed"
)
are_inequations_checkable = False
in_rad_wind, are_inequations_checkable = self._check_value_type(
values["Outer Winding"]["Inner Radius"],
float,
are_inequations_checkable,
"Inner Radius(Winding)",
"a strictly positive float",
)
out_rad_wind, are_inequations_checkable = self._check_value_type(
values["Outer Winding"]["Outer Radius"],
float,
are_inequations_checkable,
"Outer Radius(Winding)",
"a strictly positive float",
)
height_wind, are_inequations_checkable = self._check_value_type(
values["Outer Winding"]["Height"],
float,
are_inequations_checkable,
"Height(Winding)",
"a strictly positive float",
)
turns, are_inequations_checkable = self._check_value_type(
values["Outer Winding"]["Turns"],
int,
are_inequations_checkable,
"Turns(Outer Winding)",
"a strictly positive integer",
)
wind_pit, are_inequations_checkable = self._check_value_type(
values["Outer Winding"]["Coil Pit(deg)"],
float,
are_inequations_checkable,
"Coil Pit(Outer Winding)",
"a strictly positive float",
)
dia_wire, are_inequations_checkable = self._check_value_type(
values["Outer Winding"]["Wire Diameter"],
float,
are_inequations_checkable,
"Wire Diameter",
"a strictly positive float",
)
turns2, are_inequations_checkable = self._check_value_type(
values["Mid Winding"]["Turns"],
int,
are_inequations_checkable,
"Turns(Mid Winding)",
"a strictly positive integer",
)
wind_pit2, are_inequations_checkable = self._check_value_type(
values["Mid Winding"]["Coil Pit(deg)"],
float,
are_inequations_checkable,
"Coil Pit(Mid Winding)",
"a strictly positive float",
)
turns3, are_inequations_checkable = self._check_value_type(
values["Inner Winding"]["Turns"],
int,
are_inequations_checkable,
"Turns(Inner Winding)",
"a strictly positive integer",
)
wind_pit3, are_inequations_checkable = self._check_value_type(
values["Inner Winding"]["Coil Pit(deg)"],
float,
are_inequations_checkable,
"Coil Pit(Inner Winding)",
"a strictly positive float",
)
if are_inequations_checkable:
teta = radians(wind_pit)
teta2 = radians(wind_pit2)
teta3 = radians(wind_pit3)
nb_wind = 1
if values["Number of Windings"]["2"]:
nb_wind = 2
if values["Number of Windings"]["3"]:
nb_wind = 3
if values["Number of Windings"]["4"]:
nb_wind = 4
nb_lay = 0
if values["Layer"]["Double"]:
nb_lay = 2
if values["Layer"]["Triple"]:
nb_lay = 4
if in_rad_wind > in_rad_core - (nb_lay + 1) * sr * dia_wire / 2:
in_rad_wind = in_rad_core - (nb_lay + 1) * sr * dia_wire / 2
values["Outer Winding"]["Inner Radius"] = in_rad_wind
self.logger.warning("Inner Radius of the winding is too high. The maximum value has been set instead.")
if out_rad_wind < out_rad_core + (nb_lay + 1) * sr * dia_wire / 2:
out_rad_wind = out_rad_core + (nb_lay + 1) * sr * dia_wire / 2
values["Outer Winding"]["Outer Radius"] = out_rad_wind
self.logger.warning("Outer Radius of the winding is too low. The minimum value has been set instead.")
if height_wind < height_core + (nb_lay + 1) * sr * dia_wire:
height_wind = height_core + (nb_lay + 1) * sr * dia_wire
values["Outer Winding"]["Height"] = height_wind
self.logger.warning("Height of the winding is too low. The minimum value has been set instead.")
if asin((sr * dia_wire / 2) / in_rad_wind) > pi / nb_wind / turns:
turns = int(pi / nb_wind / asin((sr * dia_wire / 2) / in_rad_wind))
values["Outer Winding"]["Turns"] = turns
self.logger.warning(
"Number of turns of the winding is too high. The maximum value has been set instead."
)
if teta > pi / nb_wind / turns:
teta = GeometryOperators.degrees_default_rounded(pi / nb_wind / turns, 3)
values["Outer Winding"]["Coil Pit(deg)"] = teta
self.logger.warning("Winding Pit is too high. The maximum value has been set instead.")
elif teta < asin((sr * dia_wire / 2) / in_rad_wind):
teta = GeometryOperators.degrees_over_rounded(asin((sr * dia_wire / 2) / in_rad_wind), 3)
values["Outer Winding"]["Coil Pit(deg)"] = teta
self.logger.warning("Winding Pit is too low. The minimum value has been set instead.")
else:
teta = degrees(teta)
occ = 100 * turns * teta / (180 / nb_wind)
if occ == 100:
teta = teta - 0.0003
values["Outer Winding"]["Coil Pit(deg)"] = teta
if teta < asin((sr * dia_wire / 2) / in_rad_wind) and turns > 1:
turns = turns - 1
occ = 100 * turns * teta / (180 / nb_wind)
values["Outer Winding"]["Occupation(%)"] = occ
if values["Similar Layer"]["Different"]:
if values["Layer"]["Double"] or values["Layer"]["Triple"]:
if asin((sr * dia_wire / 2) / (in_rad_wind + sr * dia_wire)) > pi / nb_wind / turns2:
turns2 = int(pi / nb_wind / asin((sr * dia_wire / 2) / (in_rad_wind + sr * dia_wire)))
values["Mid Winding"]["Turns"] = turns2
self.logger.warning(
"Number of turns of the winding of the second layer is too high. "
"The maximum value has been set instead."
)
if turns2 < turns:
turns2 = turns
values["Mid Winding"]["Turns"] = turns2
self.logger.warning(
"Number of turns of the winding of the second layer should be "
"at least equal to those of the first layer."
)
if teta2 > pi / nb_wind / turns2:
teta2 = GeometryOperators.degrees_default_rounded(pi / nb_wind / turns2, 3)
values["Mid Winding"]["Coil Pit(deg)"] = teta2
self.logger.warning(
"Winding Pit of the second layer is too high. The maximum value has been set instead."
)
elif teta2 < asin((sr * dia_wire / 2) / (in_rad_wind + sr * dia_wire)):
teta2 = GeometryOperators.degrees_over_rounded(
asin((sr * dia_wire / 2) / (in_rad_wind + sr * dia_wire)), 3
)
values["Mid Winding"]["Coil Pit(deg)"] = teta2
self.logger.warning(
"Winding Pit of the second layer is too low. The minimum value has been set instead."
)
else:
teta2 = degrees(teta2)
values["Mid Winding"]["Coil Pit(deg)"] = teta2
occ2 = 100 * turns2 * teta2 / (180 / nb_wind)
if occ2 < occ:
teta2 = ceil(turns * teta / turns2 * 1000) / 1000
values["Mid Winding"]["Coil Pit(deg)"] = teta2
occ2 = 100 * turns2 * teta2 / (180 / nb_wind)
self.logger.warning(
"Occupation of the second layer should be at least equal to that of the first layer."
)
if occ2 == 100:
teta2 = teta2 - 0.0002
values["Mid Winding"]["Coil Pit(deg)"] = teta2
occ2 = 100 * turns2 * teta2 / (180 / nb_wind)
values["Mid Winding"]["Occupation(%)"] = occ2
# TODO if occ2 == 100: method can be improve
if values["Layer"]["Triple"]:
if asin((sr * dia_wire / 2) / (in_rad_wind + 2 * sr * dia_wire)) > pi / nb_wind / turns3:
turns3 = int(pi / nb_wind / asin((sr * dia_wire / 2) / (in_rad_wind + 2 * sr * dia_wire)))
values["Inner Winding"]["Turns"] = turns3
self.logger.warning(
"Number of turns of the winding of the third layer is too high. "
"The maximum value has been set instead."
)
if turns3 < turns2:
turns3 = turns2
values["Inner Winding"]["Turns"] = turns3
self.logger.warning(
"Number of turns of the winding of the third layer should be "
"at least equal to those of the second layer."
)
if teta3 > pi / nb_wind / turns3:
teta3 = GeometryOperators.degrees_default_rounded(pi / nb_wind / turns3, 3)
values["Inner Winding"]["Coil Pit(deg)"] = teta3
self.logger.warning(
"Winding Pit of the third layer is too high. The maximum value has been set instead."
)
elif teta3 < asin((sr * dia_wire / 2) / (in_rad_wind + 2 * sr * dia_wire)):
teta3 = GeometryOperators.degrees_over_rounded(
asin((sr * dia_wire / 2) / (in_rad_wind + 2 * sr * dia_wire)), 3
)
values["Inner Winding"]["Coil Pit(deg)"] = teta3
self.logger.warning(
"Winding Pit of the third layer is too low. The minimum value has been set instead."
)
else:
teta3 = degrees(teta3)
values["Inner Winding"]["Coil Pit(deg)"] = teta3
occ3 = 100 * turns3 * teta3 / (180 / nb_wind)
if occ3 < occ2:
teta3 = ceil(turns2 * teta2 / turns3 * 1000) / 1000
values["Inner Winding"]["Coil Pit(deg)"] = teta3
occ3 = 100 * turns3 * teta3 / (180 / nb_wind)
if occ3 == 100:
teta3 = teta3 - 0.0001
values["Inner Winding"]["Coil Pit(deg)"] = teta3
occ3 = 100 * turns3 * teta3 / (180 / nb_wind)
values["Inner Winding"]["Occupation(%)"] = occ3
# TODO if occ3 == 100: method can be improve
else:
values["Mid Winding"]["Coil Pit(deg)"] = teta
values["Inner Winding"]["Coil Pit(deg)"] = teta
values["Mid Winding"]["Turns"] = turns
values["Inner Winding"]["Turns"] = turns
values["Mid Winding"]["Occupation(%)"] = occ
values["Inner Winding"]["Occupation(%)"] = occ
if create_another_file:
root_path, extension_path = os.path.splitext(json_file)
new_path = root_path + "_Corrected" + extension_path
with open(new_path, "w") as outfile:
json.dump(values, outfile)
else:
with open(json_file, "w") as outfile:
json.dump(values, outfile)
return [are_inequations_checkable, values]
@pyaedt_function_handler()
def _make_winding_follow_chamfer(self, chamfer, security_factor, wire_diameter, layer_number):
sr = security_factor
w_rad_inc = layer_number * sr * wire_diameter / 2
distance = sqrt(2 * w_rad_inc**2) - w_rad_inc + sqrt(2 * chamfer**2) / 2
return sqrt(2) * distance
@pyaedt_function_handler()
def _check_value_type(self, taken_value, value_type, are_inequations_checkable, part_message1, part_message2):
are_inequations_checkable = are_inequations_checkable
if value_type == int:
try:
receiving_variable = int(taken_value)
if receiving_variable <= 0:
self.logger.error(
"The character entered is invalid. "
+ part_message1
+ " must be "
+ part_message2
+ ". It must be changed"
)
are_inequations_checkable = False
except:
receiving_variable = None
self.logger.error(
"The character entered is invalid. "
+ part_message1
+ " must be "
+ part_message2
+ ". It must be changed"
)
are_inequations_checkable = False
elif value_type == float:
try:
receiving_variable = float(taken_value)
if receiving_variable <= 0:
self.logger.error(
"The character entered is invalid. "
+ part_message1
+ " must be "
+ part_message2
+ ". It must be changed"
)
are_inequations_checkable = False
except:
receiving_variable = None
self.logger.error(
"The character entered is invalid. "
+ part_message1
+ " must be "
+ part_message2
+ ". It must be changed"
)
are_inequations_checkable = False
return receiving_variable, are_inequations_checkable
|
StarcoderdataPython
|
1749726
|
import os
import tensorflow as tf
import operator
from pathlib import Path
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification, AutoConfig
from .config import model_params, model_location
from .get_weights import Download
from .label_mapping import LabelMapping
from . import __version__
class DialogTag:
def __init__(self, model_name):
self.__model_name = model_name
self.__lib_path = f"{str(Path.home())}"+ model_location["MODEL"]
self.__model_path = os.path.join(self.__lib_path, self.__model_name)
self.__label_mapping_path = os.path.join(self.__lib_path, self.__model_name) + model_location["label_mapping"]
# print(self.__lib_path, self.__model_path, self.__label_mapping_path)
path_exists = os.path.exists(self.__model_path)
self.__num = 0
if(path_exists==True):
self.__num = len(os.listdir(self.__model_path))
if(self.__num<3 or path_exists==False):
print("Model not found in cache. Downloading...")
self.__model_file = Download(self.__model_name)
self.__model_file.download_file()
else:
print(f"{self.__model_name} found in cache. Loading model...")
self.__tokenizer = AutoTokenizer.from_pretrained(self.__model_name, do_lower_case=True)
self.__config = AutoConfig.from_pretrained(self.__model_path, num_labels=model_params["num_labels"])
self.__model = TFAutoModelForSequenceClassification.from_pretrained(self.__model_path, config=self.__config)
def __classhelper(self):
mapping_object = LabelMapping(self.__label_mapping_path)
logits_class, class_expanded = mapping_object.helper()
return logits_class, class_expanded
def predict_tag(self, sentence):
predict_input = self.__tokenizer.encode(sentence,
truncation=True,
padding=True,
return_tensors="tf")
tf_output = self.__model.predict(predict_input)[0]
tf_prediction = tf.nn.softmax(tf_output, axis=1).numpy()[0]
index, value = max(enumerate(tf_prediction), key=operator.itemgetter(1))
# print(value)
logits_class, class_expanded = self.__classhelper()
return class_expanded[logits_class[str(index)]]
# test_sentence = "With their homes in ashes, residents share harrowing tales of survival after massive wildfires kill 15"
if __name__=='__main__':
A = DialogTag('distilbert-base-uncased')
# z = A.predict_tag("With their homes in ashes, residents share harrowing tales of survival after massive wildfires kill 15")
z = A.predict_tag("Stop talking silly!")
print(z)
|
StarcoderdataPython
|
3278256
|
<filename>docs/_mocked_modules/ctypes/__init__.py
"""Bare minimum mock version of ctypes.
This shadows the real ctypes module when building the documentation,
so that :mod:`rubicon.objc` can be imported by Sphinx autodoc even when no Objective-C runtime is available.
This module only emulates enough of ctypes to make the docs build.
Most parts are in no way accurately implemented, and some ctypes features are missing entirely.
Parts of this file are based on the source code of the ctypes module from CPython,
under the terms of the PSF License Version 2, included below.
The code in question has all parts removed that we don't need,
and any remaining dependencies on the native _ctypes module have been replaced with pure Python code.
Specifically, the following parts are (partially) based on CPython source code:
* the definitions of the "ctypes primitive types" (the :class:`_SimpleCData` subclasses and their aliases)
* the implementations of :func:`CFUNCTYPE` and :func:`PYFUNCTYPE`
* the implementations of :class:`CDLL`, :class:`PyDLL` and :class:`LibraryLoader`
* the definitions of the :data:`pythonapi`, :data:`cdll` and :data:`pydll` globals
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
--------------------------------------------
1. This LICENSE AGREEMENT is between the Python Software Foundation
("PSF"), and the Individual or Organization ("Licensee") accessing and
otherwise using this software ("Python") in source or binary form and
its associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python alone or in any derivative version,
provided, however, that PSF's License Agreement and PSF's notice of copyright,
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation;
All Rights Reserved" are retained in Python alone or in any derivative version
prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python.
4. PSF is making Python available to Licensee on an "AS IS"
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between PSF and
Licensee. This License Agreement does not grant permission to use PSF
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using Python, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.
"""
import struct
# We pretend to be a 64-bit system.
_POINTER_SIZE = 8
class ArgumentError(Exception):
pass
_array_type_cache = {}
class _CDataMeta(type):
def __mul__(self, count):
try:
return _array_type_cache[self, count]
except KeyError:
array_type = type(
"{}_Array_{}".format(self.__name__, str(count)),
(Array,),
{'_type_': self, '_length_': count},
)
_array_type_cache[self, count] = array_type
return array_type
class _CData(object, metaclass=_CDataMeta):
@classmethod
def from_address(cls, address):
return cls()
@classmethod
def in_dll(cls, dll, name):
return cls()
def _auto_unwrap(self):
return self
class _SimpleCData(_CData):
@classmethod
def _sizeof(cls):
return struct.calcsize(cls._type_)
def __new__(cls, value=None):
self = super().__new__(cls)
self.value = value if value is not None else cls._DEFAULT_VALUE
return self
def __init__(self, value=None):
pass
def _auto_unwrap(self):
if _SimpleCData in type(self).__bases__:
return self.value
else:
return self
class py_object(_SimpleCData):
_type_ = "O"
_DEFAULT_VALUE = None
@classmethod
def _sizeof(cls):
return _POINTER_SIZE
class c_short(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "h"
class c_ushort(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "H"
class c_long(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "l"
class c_ulong(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "L"
class c_int(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "i"
class c_uint(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "I"
class c_float(_SimpleCData):
_DEFAULT_VALUE = 0.0
_type_ = "f"
class c_double(_SimpleCData):
_DEFAULT_VALUE = 0.0
_type_ = "d"
class c_longdouble(_SimpleCData):
_DEFAULT_VALUE = 0.0
_type_ = "g"
c_longlong = c_long
c_ulonglong = c_ulong
class c_ubyte(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "B"
class c_byte(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "b"
class c_char(_SimpleCData):
_DEFAULT_VALUE = b'\x00'
_type_ = "c"
class c_char_p(_SimpleCData):
_DEFAULT_VALUE = None
_type_ = "z"
@classmethod
def _sizeof(cls):
return _POINTER_SIZE
class c_void_p(_SimpleCData):
_DEFAULT_VALUE = None
_type_ = "P"
@classmethod
def _sizeof(cls):
return _POINTER_SIZE
class c_bool(_SimpleCData):
_DEFAULT_VALUE = False
_type_ = "?"
class c_wchar_p(_SimpleCData):
_DEFAULT_VALUE = None
_type_ = "Z"
@classmethod
def _sizeof(cls):
return _POINTER_SIZE
class c_wchar(_SimpleCData):
_DEFAULT_VALUE = '\x00'
_type_ = "u"
c_size_t = c_ulong
c_ssize_t = c_long
c_int8 = c_byte
c_uint8 = c_ubyte
c_int16 = c_short
c_uint16 = c_ushort
c_int32 = c_int
c_uint32 = c_uint
c_int64 = c_long
c_uint64 = c_ulong
class _Pointer(_CData):
pass
_pointer_type_cache = {None: c_void_p}
def POINTER(ctype):
try:
return _pointer_type_cache[ctype]
except KeyError:
pointer_ctype = type('LP_{}'.format(ctype.__name__), (_Pointer,), {'_type_': ctype})
_pointer_type_cache[ctype] = pointer_ctype
return pointer_ctype
def pointer(cvalue):
return POINTER(type(cvalue))(cvalue)
class Array(_CData):
pass
class Structure(_CData):
def __init__(self, *args):
super().__init__()
if args:
for (name, _ctype), value in zip(type(self)._fields_, args):
setattr(self, name, value)
else:
for name, ctype in type(self)._fields_:
setattr(self, name, ctype()._auto_unwrap())
class Union(_CData):
pass
class CFuncPtr(_CData):
_restype_ = None
_argtypes_ = ()
def __init__(self, src):
super().__init__()
if isinstance(src, tuple):
(name, dll) = src
self._func_name = name
self._dll_name = dll._name
else:
self._func_name = None
self._dll_name = None
self.restype = type(self)._restype_
self.argtypes = type(self)._argtypes_
def __call__(self, *args):
if self.restype is None:
return None
else:
if self._dll_name == 'objc' and self._func_name in {'objc_getClass', 'objc_getProtocol'}:
res = self.restype(hash(args[0]))
else:
res = self.restype()
return res._auto_unwrap()
_c_functype_cache = {}
def CFUNCTYPE(restype, *argtypes):
try:
return _c_functype_cache[(restype, argtypes)]
except KeyError:
class CFunctionType(CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_c_functype_cache[(restype, argtypes)] = CFunctionType
return CFunctionType
def PYFUNCTYPE(restype, *argtypes):
class CFunctionType(CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
return CFunctionType
def sizeof(ctype):
return ctype._sizeof()
def addressof(cvalue):
return id(cvalue)
def alignment(ctype):
return sizeof(ctype)
def byref(ctype):
return pointer(ctype)
def cast(cvalue, ctype):
if isinstance(cvalue, ctype):
return cvalue
else:
return ctype(cvalue.value)
def memmove(dst, src, count):
raise NotImplementedError('memmove({}, {}, {})'.format(dst, src, count))
def string_at(address):
return c_char_p(b'')
class CDLL(object):
_func_restype_ = c_int
def __init__(self, name):
super().__init__()
self._name = name
class _FuncPtr(CFuncPtr):
_restype_ = self._func_restype_
self._FuncPtr = _FuncPtr
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
raise AttributeError(name)
func = self.__getitem__(name)
setattr(self, name, func)
return func
def __getitem__(self, name_or_ordinal):
func = self._FuncPtr((name_or_ordinal, self))
if not isinstance(name_or_ordinal, int):
func.__name__ = name_or_ordinal
return func
class PyDLL(CDLL):
pass
pythonapi = PyDLL(None)
class LibraryLoader(object):
def __init__(self, dlltype):
self._dlltype = dlltype
def __getattr__(self, name):
if name[0] == '_':
raise AttributeError(name)
dll = self._dlltype(name)
setattr(self, name, dll)
return dll
def __getitem__(self, name):
return getattr(self, name)
def LoadLibrary(self, name):
return self._dlltype(name)
cdll = LibraryLoader(CDLL)
pydll = LibraryLoader(PyDLL)
|
StarcoderdataPython
|
3290374
|
"""Shared exceptions for the august integration."""
from openpeerpower import exceptions
class RequireValidation(exceptions.OpenPeerPowerError):
"""Error to indicate we require validation (2fa)."""
class CannotConnect(exceptions.OpenPeerPowerError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.OpenPeerPowerError):
"""Error to indicate there is invalid auth."""
|
StarcoderdataPython
|
1626442
|
# Execute a trajetory in a force mode.
# Author: <NAME>
# Force mode example
import robot_controller
# communicate with a robot
manipulator = robot_controller.Ur3("192.168.3.11", 30003, 30002)
force_traj = list()
pose = manipulator.get_pose()
pose[2] -= 0.1
for i in range(3):
pose[1] += 0.05
pt = manipulator.create_move_command(pose, is_movej=False, a=0.2, v=0.2)
force_traj.append(pt)
print manipulator.execute_in_force_mode(force_traj)
|
StarcoderdataPython
|
4834086
|
<gh_stars>1-10
import numpy as np
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle"]
datasets_path = './datasets/jinnan2_round1_train_20190305/'
anchor_yolov2 = [[2.8523827,2.4452496 ],
[1.3892268,1.8958333 ],
[1.6490009,0.95596665],
[0.7680278,1.3883946 ],
[0.5605738,0.69167805]]
epochs_start = 208
batch_size = 10
start_lr = 0.0001
save_dir = './608/'
YOLO = {
'pretrain_model': './darknet53.conv.74',
'image_size': [608, 608],
'featmap_size': [19, 19],
'class_num': len(classes),
'anchor': anchor_yolov2
}
'''
anchor_big = np.array([[116, 90], [156, 198], [373, 326]]) / 32
anchor_medium = np.array([[30, 61], [62, 45], [59, 119]]) / 32
anchor_small = np.array([[10, 13], [16, 30], [33, 23]]) / 32
anchor = [anchor_big, anchor_medium, anchor_small]
'''
'''
img_size = 608
feat_size = img_size // 32
anchor_big = np.array([[116, 90], [156, 198], [373, 326]]) / 32
anchor_medium = np.array([[30, 61], [62, 45], [59, 119]]) / 16
anchor_small = np.array([[10, 13], [16, 30], [33, 23]]) / 8
anchor_big = np.array([[0.127, 0.158], [0.1574, 0.068], [0.0452, 0.085]]) * (feat_size)
anchor_medium = np.array([[0.0643, 0.189], [0.249, 0.184], [0.0217, 0.0628]]) * (2 * feat_size)
anchor_small = np.array([[0.0869, 0.0976], [0.077, 0.0485], [0.0461, 0.0282]]) * (4 * feat_size)
anchor_wh = [anchor_big, anchor_medium, anchor_small]
anchor = [anchor_big, anchor_medium, anchor_small]
feat = [[feat_size, feat_size], [feat_size * 2, feat_size * 2], [feat_size * 4, feat_size * 4]]
batch_size = 8
start_lr = 0.001
save_dir = './YOLOV3/'
YOLO = {
'pretrain_model': './darknet53.conv.74',
'image_size': [img_size, img_size],
'featmap_size': feat,
'class_num': len(classes),
'anchor': anchor
}
'''
|
StarcoderdataPython
|
1793086
|
import os
GH_NAME = os.environ["GH_NAME"]
GH_EMAIL = os.environ["GH_EMAIL"]
print("GH_NAME:", GH_NAME, ",GH_EMAIL:", GH_EMAIL)
|
StarcoderdataPython
|
1739120
|
<reponame>GilraGroup/baconian-project
from baconian.algo.algo import Algo
from baconian.algo.dynamics.dynamics_model import DynamicsModel
from baconian.core.core import EnvSpec
from baconian.common.logging import record_return_decorator
import numpy as np
class ModelFreeAlgo(Algo):
def __init__(self, env_spec: EnvSpec, name: str = 'model_free_algo', warm_up_trajectories_number=0):
super(ModelFreeAlgo, self).__init__(env_spec, name, warm_up_trajectories_number)
class OnPolicyAlgo(Algo):
pass
class OffPolicyAlgo(Algo):
pass
class ValueBasedAlgo(Algo):
pass
class PolicyBasedAlgo(Algo):
pass
class ModelBasedAlgo(Algo):
def __init__(self, env_spec, dynamics_model: DynamicsModel, name: str = 'model_based_algo'):
super(ModelBasedAlgo, self).__init__(env_spec, name)
self._dynamics_model = dynamics_model
self.dynamics_env = self._dynamics_model.return_as_env()
def train_dynamics(self, *args, **kwargs):
pass
@record_return_decorator(which_recorder='self')
def test_dynamics(self, env, sample_count, *args, **kwargs):
self.set_status('TEST')
env.set_status('TEST')
st = env.reset()
real_state_list = []
dyanmics_state_list = []
for i in range(sample_count):
ac = self.env_spec.action_space.sample()
self._dynamics_model.reset_state(state=st)
new_state_dynamics, _, _, _ = self.dynamics_env.step(action=ac, )
new_state_real, _, done, _ = env.step(action=ac)
real_state_list.append(new_state_real)
dyanmics_state_list.append(new_state_dynamics)
st = new_state_real
if done is True:
env.reset()
l1_loss = np.linalg.norm(np.array(real_state_list) - np.array(dyanmics_state_list), ord=1)
l2_loss = np.linalg.norm(np.array(real_state_list) - np.array(dyanmics_state_list), ord=2)
return dict(dynamics_test_l1_error=l1_loss, dynamics_test_l2_error=l2_loss)
def set_terminal_reward_function_for_dynamics_env(self, terminal_func, reward_func):
self.dynamics_env.set_terminal_reward_func(terminal_func, reward_func)
|
StarcoderdataPython
|
1645069
|
# coding=utf-8
# Author: <NAME> Cruz <<EMAIL>>
#
# License: BSD 3 clause
"""
====================================================================
Dynamic selection with linear classifiers: XOR example
====================================================================
This example shows that DS can deal with non-linear problem (XOR) using
a combination of a few linear base classifiers.
- 10 dynamic selection methods (5 DES and 5 DCS) are evaluated with
a pool composed of Decision stumps.
- Since we use Bagging to generate the base classifiers, we also
included its performance as a baseline comparison.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import BaggingClassifier
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from deslib.dcs import LCA
from deslib.dcs import MLA
from deslib.dcs import OLA
from deslib.dcs import MCB
from deslib.dcs import Rank
from deslib.des import DESKNN
from deslib.des import KNORAE
from deslib.des import KNORAU
from deslib.des import KNOP
from deslib.des import METADES
from deslib.util.datasets import make_xor
###############################################################################
# Defining helper functions to facilitate plotting the decision boundaries:
def plot_classifier_decision(ax, clf, X, mode='line', **params):
xx, yy = make_grid(X[:, 0], X[:, 1])
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
if mode == 'line':
ax.contour(xx, yy, Z, **params)
else:
ax.contourf(xx, yy, Z, **params)
ax.set_xlim((np.min(X[:, 0]), np.max(X[:, 0])))
ax.set_ylim((np.min(X[:, 1]), np.max(X[:, 0])))
def plot_dataset(X, y, ax=None, title=None, **params):
if ax is None:
ax = plt.gca()
ax.scatter(X[:, 0], X[:, 1], marker='o', c=y, s=25,
edgecolor='k', **params)
ax.set_xlabel('Feature 1')
ax.set_ylabel('Feature 2')
if title is not None:
ax.set_title(title)
return ax
def make_grid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
# Prepare the DS techniques. Changing k value to 5.
def initialize_ds(pool_classifiers, X, y, k=5):
knorau = KNORAU(pool_classifiers, k=k)
kne = KNORAE(pool_classifiers, k=k)
desknn = DESKNN(pool_classifiers, k=k)
ola = OLA(pool_classifiers, k=k)
lca = LCA(pool_classifiers, k=k)
mla = MLA(pool_classifiers, k=k)
mcb = MCB(pool_classifiers, k=k)
rank = Rank(pool_classifiers, k=k)
knop = KNOP(pool_classifiers, k=k)
meta = METADES(pool_classifiers, k=k)
list_ds = [knorau, kne, ola, lca, mla, desknn, mcb, rank, knop, meta]
names = ['KNORA-U', 'KNORA-E', 'OLA', 'LCA', 'MLA', 'DESKNN', 'MCB',
'RANK', 'KNOP', 'META-DES']
# fit the ds techniques
for ds in list_ds:
ds.fit(X, y)
return list_ds, names
###############################################################################
# Generating the dataset and training the pool of classifiers.
#
rng = np.random.RandomState(1234)
X, y = make_xor(1000, random_state=rng)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=rng)
X_DSEL, X_test, y_DSEL, y_test = train_test_split(X_train, y_train,
test_size=0.5,
random_state=rng)
pool_classifiers = BaggingClassifier(DecisionTreeClassifier(max_depth=1),
n_estimators=10,
random_state=rng)
pool_classifiers.fit(X_train, y_train)
###############################################################################
# Merging training and validation data to compose DSEL
# -----------------------------------------------------
# In this example merge the training data with the validation, to create a
# DSEL having more examples for the competence estimation. Using the training
# data for dynamic selection can be beneficial when dealing with small sample
# size datasets. However, in this case we need to have a pool composed of weak
# classifier so that the base classifiers are not able to memorize the
# training data (overfit).
X_DSEL = np.vstack((X_DSEL, X_train))
y_DSEL = np.hstack((y_DSEL, y_train))
list_ds, names = initialize_ds(pool_classifiers, X_DSEL, y_DSEL, k=7)
fig, sub = plt.subplots(4, 3, figsize=(13, 10))
plt.subplots_adjust(wspace=0.4, hspace=0.4)
ax_data = sub.flatten()[0]
ax_bagging = sub.flatten()[1]
plot_dataset(X_train, y_train, ax=ax_data, title="Training data")
plot_dataset(X_train, y_train, ax=ax_bagging)
plot_classifier_decision(ax_bagging, pool_classifiers,
X_train, mode='filled', alpha=0.4)
ax_bagging.set_title("Bagging")
# Plotting the decision border of the DS methods
for ds, name, ax in zip(list_ds, names, sub.flatten()[2:]):
plot_dataset(X_train, y_train, ax=ax)
plot_classifier_decision(ax, ds, X_train, mode='filled', alpha=0.4)
ax.set_xlim((np.min(X_train[:, 0]) - 0.1, np.max(X_train[:, 0] + 0.1)))
ax.set_ylim((np.min(X_train[:, 1]) - 0.1, np.max(X_train[:, 1] + 0.1)))
ax.set_title(name)
plt.show()
plt.tight_layout()
###############################################################################
# Evaluation on the test set
# --------------------------
#
# Finally, let's evaluate the classification accuracy of DS techniques and
# Bagging on the test set:
for ds, name in zip(list_ds, names):
print('Accuracy ' + name + ': ' + str(ds.score(X_test, y_test)))
print('Accuracy Bagging: ' + str(pool_classifiers.score(X_test, y_test)))
|
StarcoderdataPython
|
1617007
|
<reponame>Formulka/django-GDPR<gh_stars>10-100
from typing import Callable
from django.contrib.contenttypes.models import ContentType
from django.db.models import Model
from django.test import TestCase
from gdpr.models import AnonymizedData
from germanium.tools import assert_false, assert_true
class NotImplementedMixin(TestCase):
def assertNotImplemented(self, func: Callable, *args, **kwargs) -> None:
try:
func(*args, **kwargs)
except AssertionError as exc:
print("NOT IMPLEMENTED:", self.id(), exc)
else:
raise AssertionError("Function Implemented successfully!!")
def assertNotImplementedNotEqual(self, *args, **kwargs):
self.assertNotImplemented(self.assertNotEqual, *args, **kwargs)
class AnonymizedDataMixin(TestCase):
def assertAnonymizedDataExists(self, obj: Model, field: str):
content_type = ContentType.objects.get_for_model(obj.__class__)
assert_true(
AnonymizedData.objects.filter(content_type=content_type, object_id=str(obj.pk), field=field).exists())
def assertAnonymizedDataNotExists(self, obj: Model, field: str):
content_type = ContentType.objects.get_for_model(obj.__class__)
assert_false(
AnonymizedData.objects.filter(content_type=content_type, object_id=str(obj.pk), field=field).exists())
|
StarcoderdataPython
|
1774304
|
<reponame>PolicyStat/terrarium<filename>tests/tests.py
# new tests should be added to test_cli.py, not here
from __future__ import absolute_import
import copy
import hashlib
import os
import shlex
import shutil
import subprocess
import sys
import tempfile
import unittest
class TerrariumTester(unittest.TestCase):
def setUp(self):
_, requirements = tempfile.mkstemp(prefix='test_terrarium_req-')
target = tempfile.mkdtemp(prefix='test_terrarium_target-')
self.initial_config = {
'target': target,
'storage_dir': tempfile.mkdtemp(prefix='test_terrarium_storage-'),
'python': os.path.join(target, 'bin', 'python'),
'terrarium': 'terrarium',
'requirements': requirements,
'environ': {},
'opts': '',
}
self.configs = []
self.config_push(initial=True)
@property
def config(self):
return self.configs[0]
@property
def target(self):
return self.config['target']
@property
def storage_dir(self):
return self.config['storage_dir']
@property
def python(self):
return self.config['python']
@property
def terrarium(self):
return self.config['terrarium']
@property
def environ(self):
return self.config['environ']
@property
def requirements(self):
return self.config['requirements']
@property
def opts(self):
return self.config['opts']
def config_pop(self):
return self.configs.pop()
def config_push(self, initial=True):
if initial:
config = copy.deepcopy(self.initial_config)
else:
config = copy.deepcopy(self.configs[0])
self.configs.insert(0, config)
return config
def tearDown(self):
for config in self.configs:
if os.path.exists(config['target']):
shutil.rmtree(config['target'])
if os.path.exists('%s.bak' % config['target']):
shutil.rmtree('%s.bak' % config['target'])
if os.path.exists(config['storage_dir']):
shutil.rmtree(config['storage_dir'])
if os.path.exists(config['requirements']):
os.unlink(config['requirements'])
def _run(self, command, **kwargs):
defaults = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
defaults.update(kwargs)
env = {}
if self.environ:
env.update(os.environ)
env.update(self.environ)
defaults['env'] = env
kwargs = defaults
sys.stdout.write('Executing "%s"\n' % command)
params = shlex.split(command)
result = subprocess.Popen(params, **kwargs)
stdout, stderr = result.communicate()
return (stdout, stderr), result.returncode
def _get_path(self, *paths):
paths = list(paths)
paths.insert(
0,
os.path.dirname(
os.path.abspath(__file__)
),
)
return os.path.abspath(
os.path.join(*paths)
)
def _get_path_terrarium(self):
return self._get_path('..')
def _python(self, command='', **kwargs):
output, return_code = self._run(
'%s %s' % (
self.python,
command,
)
)
return output, return_code
def _terrarium(self, command='', call_using_python=False, **kwargs):
options = []
for key, value in kwargs.items():
options.append('--%s' % key.replace('_', '-'))
if value is not None and value is not True:
options.append(value)
command = ' '.join([
self.terrarium,
' '.join(options),
self.opts,
command,
])
if call_using_python:
output, return_code = self._python(command)
else:
output, return_code = self._run(
command,
)
return output, return_code
def _install(self, call_using_python=False, **kwargs):
command = 'install %s' % (
self.requirements,
)
output, return_code = self._terrarium(
command,
target=self.target,
call_using_python=call_using_python,
**kwargs
)
return output, return_code
def _key(self, **kwargs):
command = 'key %s' % (
self.requirements,
)
(stdout, stderr), return_code = self._terrarium(command)
self.assertEqual(return_code, 0)
self.assertEqual(stderr, '')
requirements_key = stdout.strip()
return requirements_key
def _add_requirements(self, *requirements):
with open(self.requirements, 'a') as f:
f.writelines('\n'.join(requirements))
f.write('\n')
def _add_test_requirement(self):
test_requirement = self._get_path('fixtures', 'test_requirement')
self._add_requirements(test_requirement)
def _add_terrarium_requirement(self):
import virtualenv
self._add_requirements(
os.environ['TOX_PACKAGE'],
'virtualenv==%s' % virtualenv.virtualenv_version
)
def _clear_requirements(self, *requirements):
with open(self.requirements, 'w'):
pass
def _can_import_requirements(self, *requirements):
imported = []
for r in requirements:
output, return_code = self._python(
'-c "import %s"' % r
)
if return_code == 0:
imported.append(r)
return imported
def assertInstall(self, *args, **kwargs):
expected_return_code = kwargs.pop('return_code', 0)
(stdout, stderr), return_code = self._install(*args, **kwargs)
# Print output so it is displayed in the event of an error
sys.stdout.write('\n---------- stdout ----------\n')
sys.stdout.write(stdout)
sys.stdout.write('\n---------- stderr ----------\n')
sys.stdout.write(stderr)
sys.stdout.write('\n---------- ------ ----------\n')
self.assertEqual(return_code, expected_return_code)
return stdout, stderr
def assertExists(self, path):
self.assertTrue(os.path.exists(path))
def assertNotExists(self, path):
self.assertFalse(os.path.exists(path))
class TestTerrarium(TerrariumTester):
def test_install_requirements_with_dependency(self):
# This test involves a requirements file with two items,
# test_requirement and foo_requirement. foo_requirement has
# test_requirement as a dependency. We check that, if test_requirement
# comes first in the requirements, the install of foo_requirement will
# be successful.
self._add_requirements(
self._get_path('fixtures', 'test_requirement'),
self._get_path('fixtures', 'foo_requirement'),
)
self.assertInstall()
actual = self._can_import_requirements(
'test_requirement',
'foo_requirement',
)
expected = ['test_requirement', 'foo_requirement']
self.assertEqual(actual, expected)
def test_install_with_requirement_comments(self):
# Verify that a requirement file with comment lines can be used.
self._add_requirements(
self._get_path('fixtures', 'test_requirement'),
'# This is a comment line in the requirements file.',
)
self.assertInstall()
actual = self._can_import_requirements(
'test_requirement',
)
expected = ['test_requirement']
self.assertEqual(actual, expected)
def test_install_editable_with_hash_egg_name(self):
# Verify that a requirement file with a hash egg name can be used and
# is not confused with a comment
# If the #egg=foobar is removed, pip will fail
self._add_requirements(
'-e git+git://github.com/PolicyStat/terrarium.git#egg=foobar',
)
self.assertInstall()
actual = self._can_import_requirements(
'terrarium',
)
expected = ['terrarium']
self.assertEqual(actual, expected)
def test_hash_default_empty_requirements(self):
# Verify that the hash of an empty requirements file is predictable
command = 'hash %s' % (
self.requirements,
)
(stdout, stderr), return_code = self._terrarium(command)
expected_digest = hashlib.md5('').hexdigest()
self.assertEqual(return_code, 0)
self.assertEqual(stdout.strip(), expected_digest)
self.assertEqual(stderr, '')
def test_install_old_backup_symlink(self):
# Create a scenario where the backup (from a previous install) is
# actually a symlink instead of a directory
os.symlink(self.target, '%s.bak' % self.target)
self.assertInstall()
self.assertInstall()
def test_install_replace_activate_virtualenv_path(self):
# Verify that when replacing an existing virtualenv, the VIRTUAL_ENV
# path in the activate script matches the original path of the
# replaced environment
self.assertInstall()
self.assertInstall()
activate = os.path.join(self.target, 'bin', 'activate')
with open(activate) as f:
contents = f.read()
self.assertTrue(
'VIRTUAL_ENV="%s"' % self.target
in contents
)
def test_install_storage_dir_archive(self):
# Verify that the --storage-dir option causes terrarium create an
# archive for the given requirement set
self.assertInstall(storage_dir=self.storage_dir)
requirements_key = self._key()
archive = os.path.join(self.storage_dir, requirements_key)
self.assertExists(archive)
# Verify that the environment is returned to a usable state
activate = os.path.join(self.target, 'bin', 'activate')
with open(activate) as f:
contents = f.read()
self.assertTrue(
'VIRTUAL_ENV="%s"' % self.target
in contents
)
def test_install_storage_dir_archive_by_environ(self):
# Verify that the --storage-dir option causes terrarium create an
# archive for the given requirement set
self.environ['TERRARIUM_STORAGE_DIR'] = self.storage_dir
self.assertInstall()
requirements_key = self._key()
archive = os.path.join(self.storage_dir, requirements_key)
self.assertExists(archive)
# Verify that the environment is returned to a usable state
activate = os.path.join(self.target, 'bin', 'activate')
with open(activate) as f:
contents = f.read()
self.assertTrue(
'VIRTUAL_ENV="%s"' % self.target
in contents
)
def test_install_storage_dir_no_archive(self):
# Verify that the --no-upload option causes terrarium to not create an
# archive for the given requirement set
self.assertInstall(
storage_dir=self.storage_dir,
no_upload=True,
)
requirements_key = self._key()
archive = os.path.join(self.storage_dir, requirements_key)
self.assertNotExists(archive)
def test_install_storage_dir_archive_extracted(self):
# Verify that an archived terrarium can be later extracted and used
# Build an archive
self._add_test_requirement()
self.assertInstall(storage_dir=self.storage_dir)
requirements_key = self._key()
archive = os.path.join(self.storage_dir, requirements_key)
self.assertExists(archive)
# Just install a blank environment
self._clear_requirements()
# Replace the environment with something else
self.assertInstall(no_backup=True)
actual = self._can_import_requirements(
'test_requirement', # Should not exist in the replacement
)
expected = []
self.assertEqual(actual, expected)
# Now attempt to install from the archive
self._add_test_requirement()
stdout, stderr = self.assertInstall(
no_backup=True,
storage_dir=self.storage_dir,
verbose=True,
)
self.assertNotEqual(stdout, '')
self.assertEqual(stderr, '')
actual = self._can_import_requirements(
'test_requirement', # Should exist now
)
expected = ['test_requirement']
self.assertEqual(actual, expected)
def test_install_with_terrarium_in_environment(self):
# Verify that terrarium can replace an existing environment, the one
# that terrarium executes from
self._add_test_requirement()
self._add_terrarium_requirement()
self.assertInstall()
actual = self._can_import_requirements(
'test_requirement',
'terrarium',
)
expected = [
'test_requirement',
'terrarium',
]
self.assertEqual(actual, expected)
# Use terrarium contained in the new environment
config = self.config_push()
config['terrarium'] = os.path.join(
self.target,
'bin',
'terrarium',
)
output = self.assertInstall(
no_backup=True,
call_using_python=True,
)
self.assertFalse('Requirement already satisfied' in output[0])
actual = self._can_import_requirements(
'test_requirement',
'terrarium',
)
expected = [
'test_requirement',
'terrarium',
]
self.assertEqual(actual, expected)
def test_extract_with_terrarium_in_environment(self):
# Verify that terrarium can install after being extracted from an
# archive that was previously installed
self._add_terrarium_requirement()
self.assertInstall(storage_dir=self.storage_dir)
# Use terrarium contained in the new environment
config = self.config_push()
config['terrarium'] = os.path.join(
self.target,
'bin',
'terrarium',
)
config['opts'] = '-VV'
self.assertInstall(
no_backup=True,
storage_dir=self.storage_dir,
)
self.assertExists(self.python)
def test_logging_output_default(self):
self._add_test_requirement()
self._add_terrarium_requirement()
stdout, stderr = self.assertInstall()
self.assertEqual('', stdout)
self.assertEqual('', stderr)
def test_logging_output_verbose(self):
self._add_test_requirement()
self._add_terrarium_requirement()
stdout, stderr = self.assertInstall(verbose=True)
self.assertNotEqual('', stdout)
self.assertEqual('', stderr)
def test_sensitive_arguments_are_sensitive(self):
command = 'hash %s' % (
self.requirements,
)
self.config['opts'] = '-VV'
(stdout, stderr), return_code = self._terrarium(
command,
s3_secret_key='should_not_appear',
s3_access_key='do_not_show_me',
)
self.assertEqual('', stderr)
self.assertEqual(return_code, 0)
self.assertTrue(
stdout.startswith('[DEBUG] Initialized with Namespace')
)
self.assertTrue('s3_secret_key' in stdout)
self.assertTrue('s3_access_key' in stdout)
self.assertTrue('should_not_appear' not in stdout)
self.assertTrue('do_not_show_me' not in stdout)
def test_restore_previously_backed_up_environment(self):
output, return_code = self._terrarium(
'revert',
target=self.target,
)
self.assertEqual(return_code, 1)
self._add_test_requirement()
self.assertInstall()
with open(os.path.join(self.target, 'foo'), 'w') as f:
f.write('bar')
self.assertInstall()
with open(os.path.join(self.target, 'moo'), 'w') as f:
f.write('cow')
self.assertExists('%s.bak' % self.target)
output, return_code = self._terrarium(
'revert',
target=self.target,
)
self.assertEqual(return_code, 0)
self.assertNotExists('%s.bak' % self.target)
self.assertExists(os.path.join(self.target, 'foo'))
self.assertNotExists(os.path.join(self.target, 'moo'))
|
StarcoderdataPython
|
1606951
|
<reponame>vishalbelsare/h2o-3<filename>h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_7481_lambda_search_alpha_array_validation_large.py<gh_stars>1000+
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator as glm
# with lambda_search=True and an alpha array and warm start, we provide a validation dataset here.
def glm_alpha_lambda_arrays():
# compare coefficients and deviance when only training dataset is available
train = h2o.import_file(path=pyunit_utils.locate("smalldata/glm_test/binomial_20_cols_10KRows.csv"))
for ind in range(10):
train[ind] = train[ind].asfactor()
train["C21"] = train["C21"].asfactor()
frames = train.split_frame(ratios=[0.8],seed=12345)
d = frames[0]
d_test = frames[1]
regKeys = ["alphas", "lambdas", "explained_deviance_valid", "explained_deviance_train"]
# compare results when validation dataset is present
mLVal = glm(family='binomial',alpha=[0.1,0.5], lambda_search=True, solver='COORDINATE_DESCENT', nlambdas=3) # train with validations set
mLVal.train(training_frame=d,x=list(range(20)),y=20, validation_frame=d_test)
rVal = glm.getGLMRegularizationPath(mLVal)
best_submodel_indexVal = mLVal._model_json["output"]["best_submodel_index"]
m2Val = glm.makeGLMModel(model=mLVal,coefs=rVal['coefficients'][best_submodel_indexVal])
dev1Val = rVal['explained_deviance_valid'][best_submodel_indexVal]
p2Val = m2Val.model_performance(d_test)
dev2Val = 1-p2Val.residual_deviance()/p2Val.null_deviance()
assert abs(dev1Val - dev2Val) < 1e-6
for l in range(0,len(rVal['lambdas'])):
m = glm(family='binomial',alpha=[rVal['alphas'][l]],Lambda=rVal['lambdas'][l],solver='COORDINATE_DESCENT')
m.train(training_frame=d,x=list(range(20)),y=20, validation_frame=d_test)
mr = glm.getGLMRegularizationPath(m)
p = m.model_performance(d_test);
cs = rVal['coefficients'][l]
cs_norm = rVal['coefficients_std'][l]
print("Comparing submodel index {0}".format(l))
pyunit_utils.assertEqualCoeffDicts(cs, m.coef(), tol=1e-1)
pyunit_utils.assertEqualCoeffDicts(cs_norm, m.coef_norm(), tol=1e-1)
pyunit_utils.assertEqualRegPaths(regKeys, rVal, l, mr, tol=1e-3)
dVal = 1-p.residual_deviance()/p.null_deviance()
if l == best_submodel_indexVal: # check training metrics, should equal for best submodel index
pyunit_utils.assertEqualModelMetrics(m._model_json["output"]["validation_metrics"],
mLVal._model_json["output"]["validation_metrics"],tol=1e-2)
else: # for other submodel, should have worse residual_deviance() than best submodel
assert dVal<=dev2Val, "Best submodel does not have highest explained deviance_valid for submodel: !".format(l)
if __name__ == "__main__":
pyunit_utils.standalone_test(glm_alpha_lambda_arrays)
else:
glm_alpha_lambda_arrays()
|
StarcoderdataPython
|
3311598
|
################################################################
# Implemented by <NAME> (<EMAIL>) #
# #
# PyTorch-compatible implmentation of Integrated Gradients #
# proposed in "Axiomatic attribution for deep neuron networks" #
# (https://arxiv.org/abs/1703.01365). #
# #
# Keywords: Shapley values, interpretable machine learning #
################################################################
import torch
import numpy as np
import torch.nn.functional as F
import cv2
import argparse
import os
import sys
import random
# integrated gradients
def integrated_gradients(inputs, model, target_label_idx, predict_and_gradients, baseline, steps=50, cuda=False):
if baseline is None:
baseline = 0 * inputs
# scale inputs and compute gradients
scaled_inputs = [baseline + (float(i) / steps) * (inputs - baseline) for i in range(0, steps + 1)]
grads, _ = predict_and_gradients(scaled_inputs, model, target_label_idx, cuda)
avg_grads = np.average(grads[:-1], axis=0)
avg_grads = np.transpose(avg_grads, (1, 2, 0))
delta_X = (pre_processing(inputs, cuda) - pre_processing(baseline, cuda)).detach().squeeze(0).cpu().numpy()
delta_X = np.transpose(delta_X, (1, 2, 0))
integrated_grad = delta_X * avg_grads
return integrated_grad
def random_baseline_integrated_gradients(inputs, model, target_label_idx, predict_and_gradients, steps, num_random_trials, cuda):
all_intgrads = []
for i in range(num_random_trials):
integrated_grad = integrated_gradients(inputs, model, target_label_idx, predict_and_gradients, \
baseline=255.0 *np.random.random(inputs.shape), steps=steps, cuda=cuda)
all_intgrads.append(integrated_grad)
print('the trial number is: {}'.format(i))
avg_intgrads = np.average(np.array(all_intgrads), axis=0)
return avg_intgrads
def calculate_outputs_and_gradients(inputs, model, target_label_idx, cuda=False):
# do the pre-processing
predict_idx = None
gradients = []
for input in inputs:
input = pre_processing(input, cuda)
output = model(input)
output = F.softmax(output, dim=1)
if target_label_idx is None:
target_label_idx = torch.argmax(output, 1).item()
index = np.ones((output.size()[0], 1)) * target_label_idx
index = torch.tensor(index, dtype=torch.int64)
if cuda:
index = index.cuda()
output = output.gather(1, index)
# clear grad
model.zero_grad()
output.backward()
gradient = input.grad.detach().cpu().numpy()[0]
gradients.append(gradient)
gradients = np.array(gradients)
return gradients, target_label_idx
def pre_processing(obs, cuda):
mean = np.array([0.485, 0.456, 0.406]).reshape([1, 1, 3])
std = np.array([0.229, 0.224, 0.225]).reshape([1, 1, 3])
obs = obs / 255
obs = (obs - mean) / std
obs = np.transpose(obs, (2, 0, 1))
obs = np.expand_dims(obs, 0)
obs = np.array(obs)
if cuda:
torch_device = torch.device('cuda:0')
else:
torch_device = torch.device('cpu')
obs_tensor = torch.tensor(obs, dtype=torch.float32, device=torch_device, requires_grad=True)
return obs_tensor
# generate the entire images
def generate_entrie_images(img_origin, img_grad, img_grad_overlay, img_integrad, img_integrad_overlay):
blank = np.ones((img_grad.shape[0], 10, 3), dtype=np.uint8) * 255
blank_hor = np.ones((10, 20 + img_grad.shape[0] * 3, 3), dtype=np.uint8) * 255
upper = np.concatenate([img_origin[:, :, (2, 1, 0)], blank, img_grad_overlay, blank, img_grad], 1)
down = np.concatenate([img_origin[:, :, (2, 1, 0)], blank, img_integrad_overlay, blank, img_integrad], 1)
total = np.concatenate([upper, blank_hor, down], 0)
total = cv2.resize(total, (550, 364))
return total
G = [0, 255, 0]
R = [255, 0, 0]
def convert_to_gray_scale(attributions):
return np.average(attributions, axis=2)
def linear_transform(attributions, clip_above_percentile=99.9, clip_below_percentile=70.0, low=0.2, plot_distribution=False):
m = compute_threshold_by_top_percentage(attributions, percentage=100-clip_above_percentile, plot_distribution=plot_distribution)
e = compute_threshold_by_top_percentage(attributions, percentage=100-clip_below_percentile, plot_distribution=plot_distribution)
transformed = (1 - low) * (np.abs(attributions) - e) / (m - e) + low
transformed *= np.sign(attributions)
transformed *= (transformed >= low)
transformed = np.clip(transformed, 0.0, 1.0)
return transformed
def compute_threshold_by_top_percentage(attributions, percentage=60, plot_distribution=True):
if percentage < 0 or percentage > 100:
raise ValueError('percentage must be in [0, 100]')
if percentage == 100:
return np.min(attributions)
flat_attributions = attributions.flatten()
attribution_sum = np.sum(flat_attributions)
sorted_attributions = np.sort(np.abs(flat_attributions))[::-1]
cum_sum = 100.0 * np.cumsum(sorted_attributions) / attribution_sum
threshold_idx = np.where(cum_sum >= percentage)[0][0]
threshold = sorted_attributions[threshold_idx]
if plot_distribution:
raise NotImplementedError
return threshold
def polarity_function(attributions, polarity):
if polarity == 'positive':
return np.clip(attributions, 0, 1)
elif polarity == 'negative':
return np.clip(attributions, -1, 0)
else:
raise NotImplementedError
def overlay_function(attributions, image):
return np.clip(0.7 * image + 0.5 * attributions, 0, 255)
def visualize(attributions, image, positive_channel=G, negative_channel=R, polarity='positive', \
clip_above_percentile=99.9, clip_below_percentile=0, morphological_cleanup=False, \
structure=np.ones((3, 3)), outlines=False, outlines_component_percentage=90, overlay=True, \
mask_mode=False, plot_distribution=False):
if polarity == 'both':
raise NotImplementedError
elif polarity == 'positive':
attributions = polarity_function(attributions, polarity=polarity)
channel = positive_channel
# convert the attributions to the gray scale
attributions = convert_to_gray_scale(attributions)
attributions = linear_transform(attributions, clip_above_percentile, clip_below_percentile, 0.0, plot_distribution=plot_distribution)
attributions_mask = attributions.copy()
if morphological_cleanup:
raise NotImplementedError
if outlines:
raise NotImplementedError
attributions = np.expand_dims(attributions, 2) * channel
if overlay:
if mask_mode == False:
attributions = overlay_function(attributions, image)
else:
attributions = np.expand_dims(attributions_mask, 2)
attributions = np.clip(attributions * image, 0, 255)
attributions = attributions[:, :, (2, 1, 0)]
return attributions
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda', action='store_true', default=False,
help='Use NVIDIA GPU acceleration')
parser.add_argument('--model-path', type=str, default=os.path.join('../DSC180B-Face-Mask-Detection/models', 'model_resnet_best_val_acc_0.955.pt'),
help='Load model path')
parser.add_argument('--custom-image-path', type=str, default=None,
help='the custom image path')
parser.add_argument('--img-load-path', type=str, help='the image loading path')
parser.add_argument('--img-save-path', type=str, help='the image saving path')
args = parser.parse_args()
args.use_cuda = args.use_cuda and torch.cuda.is_available()
if args.use_cuda:
print("Using GPU for acceleration")
else:
print("Using CPU for computation")
return args
if __name__ == '__main__':
args = get_args()
model_path = args.model_path
img_load_path = args.img_load_path
img_save_path = args.img_save_path
custom_image_path = args.custom_image_path
try:
if args.use_cuda:
model = torch.load(model_path)
else:
model = torch.load(model_path, map_location="cpu")
except:
print("invalid model path, please check your parameter again")
sys.exit(0)
device = torch.device("cuda:0" if args.use_cuda else "cpu")
model.to(device)
# read the image
if custom_image_path is not None:
img = cv2.imread(custom_image_path)
else:
img = cv2.imread(img_load_path)
try:
img = cv2.resize(img, (224, 224))
except:
print("invalid image path, please check your parameter again")
sys.exit(0)
img = img.astype(np.float32)
img = img[:, :, (2, 1, 0)]
# calculate the gradient and the label index
gradients, label_index = calculate_outputs_and_gradients([img], model, None, args.use_cuda)
gradients = np.transpose(gradients[0], (1, 2, 0))
img_gradient_overlay = visualize(gradients, img, clip_above_percentile=99, clip_below_percentile=0, overlay=True, mask_mode=True)
img_gradient = visualize(gradients, img, clip_above_percentile=99, clip_below_percentile=0, overlay=False)
# calculae the integrated gradients
attributions = random_baseline_integrated_gradients(img, model, label_index, calculate_outputs_and_gradients, \
steps=50, num_random_trials=10, cuda=args.use_cuda)
img_integrated_gradient_overlay = visualize(attributions, img, clip_above_percentile=99, clip_below_percentile=0, \
overlay=True, mask_mode=True)
img_integrated_gradient = visualize(attributions, img, clip_above_percentile=99, clip_below_percentile=0, overlay=False)
output_img = generate_entrie_images(img, img_gradient, img_gradient_overlay, img_integrated_gradient, \
img_integrated_gradient_overlay)
if custom_image_path is not None:
cv2.imwrite("results/integrated_gradient/custom_{0}.jpg".format(random.randint(1, 10000)), np.uint8(output_img))
else:
cv2.imwrite("results/integrated_gradient/" + img_save_path, np.uint8(output_img))
|
StarcoderdataPython
|
1750404
|
<reponame>nahidupa/grr
#!/usr/bin/env python
"""Tests for validating the configs we have."""
import glob
import os
import logging
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import test_lib
from grr.lib import utils
def ValidateConfig(config_file=None):
"""Iterate over all the sections in the config file and validate them."""
logging.debug("Processing %s", config_file)
if isinstance(config_file, config_lib.GrrConfigManager):
conf_obj = config_file
else:
conf_obj = config_lib.CONFIG
conf_obj.Initialize(config_file, reset=True)
all_sections = conf_obj.GetSections()
errors = conf_obj.Validate(sections=all_sections)
return errors
class BuildConfigTests(test_lib.GRRBaseTest):
"""Tests for config functionality."""
# Server configuration files do not normally have valid client keys.
exceptions = ["Client.private_key",
"PrivateKeys.executable_signing_private_key",
"PrivateKeys.server_key", "PrivateKeys.ca_key",
"PrivateKeys.driver_signing_private_key"]
# The executables dir may be missing
exceptions.append("ClientBuilder.executables_dir")
disabled_filters = [
]
def testAllConfigs(self):
"""Go through all our config files looking for errors."""
# Test the current loaded configuration.
configs = [config_lib.CONFIG]
# Test all the other configs in the server config dir (/etc/grr by default)
glob_path = os.path.join(config_lib.CONFIG["Config.directory"], "*.yaml")
for cfg_file in glob.glob(glob_path):
if os.access(cfg_file, os.R_OK):
configs.append(cfg_file)
else:
logging.info(
"Skipping checking %s, you probably need to be root" % cfg_file)
test_filter_map = config_lib.ConfigFilter.classes_by_name
for filter_name in self.disabled_filters:
test_filter_map[filter_name] = config_lib.ConfigFilter
with utils.Stubber(config_lib.ConfigFilter, "classes_by_name",
test_filter_map):
for config_file in configs:
errors = ValidateConfig(config_file)
for exception in self.exceptions:
errors.pop(exception, None)
if errors:
self.fail("Validation of %s returned errors: %s" % (
config_file, errors))
def main(argv):
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
StarcoderdataPython
|
3311434
|
<reponame>pisskidney/leetcode
#!/usr/bin/python
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def kthSmallest(self, root, k):
res, cnt = self.inorder(root, k, 0)
return res
def inorder(self, node, k, n):
if not node:
return None, n
left_result, left_count = self.inorder(node.left, k, n)
if left_result is not None:
return left_result, 0
current_count = left_count + 1
if current_count == k:
return node.val, current_count
return self.inorder(node.right, k, current_count)
class SolutionIntByReferenceByContaineringIt(object):
def kthSmallest(self, root, k):
return self.inorder(root, k, [0])
def inorder(self, node, k, n):
if not node:
return None
left = self.inorder(node.left, k, n)
if left is not None:
return left
n[0] += 1
if n[0] == k:
return node.val
right = self.inorder(node.right, k, n)
if right is not None:
return right
return None
t10 = TreeNode(10)
t5 = TreeNode(5)
t15 = TreeNode(15)
t4 = TreeNode(4)
t7 = TreeNode(7)
t3 = TreeNode(3)
t2 = TreeNode(0)
t6 = TreeNode(6)
t9 = TreeNode(9)
t12 = TreeNode(12)
t16 = TreeNode(16)
t10.left = t5
t10.right = t15
t5.left = t4
t5.right = t7
t4.left = t3
t3.left = t2
t7.left = t6
t7.right = t9
t15.left = t12
t15.right = t16
s = Solution()
print s.kthSmallest(t10, 17)
|
StarcoderdataPython
|
79486
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import abc
import numpy as np
from typing import Union, Dict, Optional, Any, List
from .knob import BaseKnob
KnobConfig = Dict[str, BaseKnob]
Knobs = Dict[str, Any]
Params = Dict[str, Union[str, int, float, np.ndarray]]
class BaseModel(abc.ABC):
'''
Rafiki's base model class that Rafiki models must extend.
Rafiki models must implement all abstract methods below, according to the specification of its associated task (see :ref:`tasks`).
They configure how this model template will be trained, evaluated, tuned, serialized and served on Rafiki.
In the model's ``__init__`` method, call ``super().__init__(**knobs)`` as the first line,
followed by the model's initialization logic. The model should be initialize itself with ``knobs``,
a set of generated knob values for the created model instance.
These knob values are chosen by Rafiki based on the model's knob configuration (defined in :meth:`rafiki.model.BaseModel.get_knob_config`).
For example:
::
def __init__(self, **knobs):
super().__init__(**knobs)
self.__dict__.update(knobs)
...
self._build_model(self.knob1, self.knob2)
:param knobs: Dictionary mapping knob names to knob values
:type knobs: :obj:`rafiki.model.Knobs`
'''
def __init__(self, **knobs: Knobs):
pass
@abc.abstractstaticmethod
def get_knob_config() -> KnobConfig:
'''
Return a dictionary that defines the search space for this model template's knobs
(i.e. knobs' names, their types & their ranges).
Over the course of training, your model will be initialized with different values of knobs within this search space
to maximize this model’s performance.
Refer to :ref:`model-tuning` to understand more about how this works.
:returns: Dictionary mapping knob names to knob specifications
'''
raise NotImplementedError()
@abc.abstractmethod
def train(self, dataset_path: str, shared_params: Optional[Params] = None, **train_args):
'''
Train this model instance with the given traing dataset and initialized knob values.
Additional keyword arguments could be passed depending on the task's specification.
Additionally, trained parameters shared from previous trials could be passed,
as part of the ``SHARE_PARAMS`` policy (see :ref:`model-policies`).
Subsequently, the model is considered *trained*.
:param dataset_path: File path of the train dataset file in the *local filesystem*, in a format specified by the task
:param shared_params: Dictionary mapping parameter names to values, as produced by your model's :meth:`rafiki.model.BaseModel.dump_parameters`.
'''
raise NotImplementedError()
@abc.abstractmethod
def evaluate(self, dataset_path: str) -> float:
'''
Evaluate this model instance with the given validation dataset after training.
This will be called only when model is *trained*.
:param dataset_path: File path of the validation dataset file in the *local filesystem*, in a format specified by the task
:returns: A score associated with the validation performance for the trained model instance, the higher the better e.g. classification accuracy.
'''
raise NotImplementedError()
@abc.abstractmethod
def predict(self, queries: List[Any]) -> List[Any]:
'''
Make predictions on a batch of queries after training.
This will be called only when model is *trained*.
:param queries: List of queries, where a query is in the format specified by the task
:returns: List of predictions, in an order corresponding to the queries, where a prediction is in the format specified by the task
'''
raise NotImplementedError()
@abc.abstractmethod
def dump_parameters(self) -> Params:
'''
Returns a dictionary of model parameters that *fully define the trained state of the model*.
This dictionary must conform to the format :obj:`rafiki.model.Params`.
This will be used to save the trained model in Rafiki.
Additionally, trained parameters produced by this method could be shared with future trials, as
part of the ``SHARE_PARAMS`` policy (see :ref:`model-policies`).
This will be called only when model is *trained*.
:returns: Dictionary mapping parameter names to values
'''
raise NotImplementedError()
@abc.abstractmethod
def load_parameters(self, params: Params):
'''
Loads this model instance with previously trained model parameters produced by your model's :meth:`rafiki.model.BaseModel.dump_parameters`.
*This model instance's initialized knob values will match those during training*.
Subsequently, the model is considered *trained*.
'''
raise NotImplementedError()
def destroy(self):
'''
Destroy this model instance, freeing any resources held by this model instance.
No other instance methods will be called subsequently.
'''
pass
@staticmethod
def teardown():
'''
Runs class-wide teardown logic (e.g. close a training session shared across trials).
'''
pass
class PandaModel(BaseModel):
def __init__(self, **knobs: Knobs):
super().__init__(**knobs)
@abc.abstractmethod
def local_explain(self, queries, params: Params):
raise NotImplementedError()
|
StarcoderdataPython
|
3242761
|
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adam
model = Sequential()
model.add(Dense(8, activation='relu', input_dim=4))
model.add(Dense(16, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(3, activation='softmax'))
opt = Adam(lr=0.001)
model.compile(
optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy']
)
# 0 = Iris-setosa
# 1 = Iris-versicolor
# 2 = Iris-virginica
data = np.genfromtxt('iris.csv', delimiter=',')
x_train = data[1:, :4]
y_train = to_categorical(data[1:, 4])
perm = np.random.permutation(y_train.shape[0])
x_train = x_train[perm]
y_train = y_train[perm]
model.fit(
x_train,
y_train,
epochs=100,
validation_split=0.2
)
|
StarcoderdataPython
|
1726577
|
# coding=utf-8
# Author: <NAME> <<EMAIL>>
"""
The :mod:`perturbation_classifiers.subconcept` provides the implementation of
subconcept Perturbation-based Classifier (sPerC) algorithm.
"""
from .sperc import sPerC
__all__ = ['sPerC', 'clustering']
|
StarcoderdataPython
|
3366697
|
import tensorflow as tf
import numpy as np
import callbacks
import pruning
import data
import os
import models
import argparse
from effective_masks import *
from utils import *
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
parser=argparse.ArgumentParser()
parser.add_argument('--sample',type=str,default='0',help='seed code')
parser.add_argument('--path_to_data',type=str,help='path to tinyimagenet folder')
parser.add_argument('--save',type=int,default=1,help='whether to save output files (choose one of: 0, 1)')
parser.add_argument('--architecture',type=str,default='lenet300100',help='network type (choose one of: lenet300100, lenet5, vgg16, vgg19, resnet18)')
parser.add_argument('--pruner',type=str,default='snip_global',help='pruner (choose one of: dense, lamp, snip, snip/iterative, synflow, random/uniform, random/erk, random/igq, random/uniform_plus, random/synflow, magnitude/global, magnitude/uniform, magnitude/erk, magnitude/igq, magnitude/uniform_plus)')
parser.add_argument('--com_exp',type=float,default=None,help='target compression = 10 ** com_exp (overwrites --target_sparsity)')
parser.add_argument('--target_sparsity',type=float,default=0.9,help='target sparsity (overwritten by --com_exp if given)')
parser.add_argument('--pruning_type',type=str,default='direct',help='choose one of: direct, effective')
parser.add_argument('--train',type=int,default=1,help='whether to train a subnetwork (choose one of: 0, 1)')
parser.add_argument('--out_path',type=str,default='EffectiveSparsity',help='path to directory for outputs')
args=parser.parse_args()
args.target_sparsity=0 if args.pruner=='dense' else args.target_sparsity
args.pruning_type='' if args.pruner=='dense' else args.pruning_type
lenet300100_config={'data':'mnist','lr':0.1,'batch_size_train':100,'iterations':96000,'weight_decay':0.0005,'batchnorm':True,'momentum':0.9,'lr_decay':[25000,50000,75000,100000],'batch_size_snip':100} # source: Lee et al., 2018
lenet5_config={'data':'cifar10','lr':0.1,'batch_size_train':128,'iterations':120000,'weight_decay':0.0005,'batchnorm':True,'momentum':0.9,'lr_decay':[30000,60000,90000,120000],'batch_size_snip':128} # source: Lee et al., 2018
vgg16_config={'data':'cifar10','lr':0.1,'batch_size_train':128,'iterations':62500,'weight_decay':0.0001,'batchnorm':True,'momentum':0.9,'lr_decay':[31250,46875],'batch_size_snip':128} # source: Frankle et al., 2020
vgg19_config={'data':'cifar100','lr':0.1,'batch_size_train':128,'iterations':62500,'weight_decay':0.0001,'batchnorm':True,'momentum':0.9,'lr_decay':[31250,46875],'batch_size_snip':1280} # source: Wang et al., 2020
resnet18_config={'data':'tinyimagenet','lr':0.2,'batch_size_train':256,'iterations':78200,'weight_decay':0.0001,'batchnorm':True,'momentum':0.9,'lr_decay':[39100,58650],'batch_size_snip':2560} # source: Frankle et al., 2020
if args.architecture=='lenet300100':
config=lenet300100_config
if args.architecture=='lenet5':
config=lenet5_config
if args.architecture=='vgg16':
config=vgg16_config
if args.architecture=='vgg19':
config=vgg19_config
if args.architecture=='resnet18':
config=resnet18_config
def main(args):
target_compression=10**args.com_exp if args.com_exp is not None else 1./(1-args.target_sparsity)
extension=f'{args.sample}_{round(target_compression)}_'
path_to_dense=os.path.join(args.out_path,args.architecture,'dense')
args.out_path=os.path.join(args.out_path,args.architecture,args.pruner,args.pruning_type)
if not os.path.exists(args.out_path):
os.makedirs(args.out_path)
if not os.path.exists(path_to_dense):
os.makedirs(path_to_dense)
logging.basicConfig(filename=os.path.join(args.out_path,extension+'info.log'),level=logging.INFO,filemode='w')
datagen,train_X,train_y,test_X,test_y=data.get_data(config['data'],path_to_data=args.path_to_data)
epochs=int(config['batch_size_train']*config['iterations']/len(train_X))
model,tensors=models.get_model(shape=train_X[0].shape,architecture=args.architecture,batchnorm=config['batchnorm'],decay=config['weight_decay'],output_classes=len(train_y[0]))
values=[config['lr']*(0.1**i) for i in range(len(config['lr_decay'])+1)]
learningrate=tf.keras.optimizers.schedules.PiecewiseConstantDecay(config['lr_decay'],values)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=learningrate,momentum=config['momentum']),loss='categorical_crossentropy',metrics=['accuracy'])
log_list=np.arange(0,config['iterations'],1000)
pruner=pruning.Pruner(args.pruner)
masks=pruner.prune(model,tensors,1-1./target_compression,args.pruning_type,train_X=train_X,train_y=train_y,out_path=os.path.join(args.out_path,extension),config=config,sample=args.sample,path_to_dense=path_to_dense)
inits=[model.layers[layer].get_weights()[0] for layer in tensors]
log_cb=callbacks.LogCallback(model,tensors,masks,log_list,(test_X,test_y))
fit_callbacks=[callbacks.SubnetworkCallback(model,tensors,masks),log_cb]
eff_masks_custom=effective_masks_custom(model.name,masks)
eff_masks_synflow=effective_masks_synflow(model,tensors,masks)
logging.info(f'<main> [direct sparsity: {get_overall_direct_sparsity(masks):.6f}][effective sparsity: {get_overall_direct_sparsity(eff_masks_synflow):.6f}][epochs to train: {epochs}][iterations to train: {config["iterations"]}][pruner: {args.pruner}][sample: {args.sample}]')
if args.save and args.pruner!='dense':
np.save(os.path.join(args.out_path,extension)+'sparsities_effective_synflow.npy',get_direct_sparsity(eff_masks_synflow))
np.save(os.path.join(args.out_path,extension)+'sparsities_effective_custom.npy',get_direct_sparsity(eff_masks_custom))
np.save(os.path.join(args.out_path,extension)+'sparsities_direct.npy',get_direct_sparsity(masks))
np.save(os.path.join(path_to_dense,'counts.npy'),[np.prod(model.layers[layer].get_weights()[0].shape) for layer in tensors])
if args.train:
model.fit(datagen.flow(train_X,train_y,batch_size=config['batch_size_train']),steps_per_epoch=len(train_X)//config['batch_size_train'],epochs=epochs,shuffle=True,verbose=False,validation_data=(test_X,test_y),callbacks=fit_callbacks)
np.save(os.path.join(args.out_path,extension)+'accuracies.npy',log_cb.accuracies)
np.save(os.path.join(args.out_path,extension)+'losses.npy',log_cb.losses)
if args.save and args.train and args.pruner=='dense':
np.save(os.path.join(args.out_path,extension)+'inits.npy',inits)
np.save(os.path.join(args.out_path,extension)+'final_weights.npy',log_cb.final_weights)
if __name__=="__main__":
main(args)
|
StarcoderdataPython
|
3222075
|
class CSVFileMixin(object):
"""
Mixin which allows the override of the filename being
passed back to the user when the spreadsheet is downloaded.
"""
def finalize_response(self, request, response, *args, **kwargs):
response = super().finalize_response(request, response, *args, **kwargs)
if response.accepted_renderer.format == "csv":
response["Content-Disposition"] = f"attachment; filename=data.csv"
return response
def get_renderer_context(self):
context = super().get_renderer_context()
context['header'] = (
'id', 'creation_date_utc', 'branch', 'service',
'wait_time_mins', 'num_waiting',
)
return context
|
StarcoderdataPython
|
46471
|
import os
from hashlib import sha224
def search_duplicates(target, options=None):
digests = dict()
for root, dirs, files in os.walk(target):
for item in files:
path = '%s/%s' % (root, item)
hash_digest = _create_digest(path)
if hash_digest not in digests:
digests[hash_digest] = set()
digests[hash_digest].add(path)
if 'verbose' in options and options.verbose and len(digests[hash_digest]) > 1:
print 'Duplicates found by hash: %s' % hash_digest
for file_path in digests[hash_digest]:
print ' %s' % file_path
return [value for key, value in digests.iteritems() if len(value) > 1]
def _create_digest(target, buffer_size=50000000):
hash_sum = sha224()
with open(target) as f:
while True:
data = f.read(buffer_size)
if not data:
break
hash_sum.update(data)
return '[%s]->%s' % (os.path.getsize(target), hash_sum.hexdigest())
|
StarcoderdataPython
|
1684132
|
import json
from com.huawei.iotplatform.client.invokeapi.Authentication import Authentication
from com.huawei.iotplatform.client.invokeapi.BatchProcess import BatchProcess
from com.huawei.iotplatform.client.dto.AuthOutDTO import AuthOutDTO
from com.huawei.iotplatform.client.dto.BatchTaskCreateInDTO import BatchTaskCreateInDTO
from com.huawei.iotplatform.client.dto.BatchTaskCreateOutDTO import BatchTaskCreateOutDTO
from com.huawei.iotplatform.client.dto.QueryTaskDetailsInDTO import QueryTaskDetailsInDTO
from com.huawei.iotplatform.constant.Constant import Constant
class BatchProcessTest(object):
def createBatchTaskInfo(self):
btcInDTO = BatchTaskCreateInDTO()
btcInDTO.appId = "3RQ9UnhymV409MfKPuiin75XroQa"
btcInDTO.timeout = 100
btcInDTO.taskName = "a1"
btcInDTO.taskType = "DeviceCmd"
return btcInDTO
def queryTaskDetailsInfo(self):
qtdInDTO = QueryTaskDetailsInDTO()
qtdInDTO.taskID = "5bf8f8567dd2d86eab87edd9"
return qtdInDTO
if __name__ == "__main__":
bpTest = BatchProcessTest()
authentication = Authentication()
batchProcess = BatchProcess()
# get accessToken at first
result = authentication.getAuthToken(Constant().clientInfo())
authOutDTO = AuthOutDTO()
authOutDTO.setAccessToken(json.loads(result)['accessToken'])
accessToken = authOutDTO.getAccessToken()
# create a task begin
bc = batchProcess.createBatchTask(bpTest.createBatchTaskInfo(), accessToken)
print("====== create a task begin ======")
print("result:", bc + "\n")
# get taskID
btcOutDTO = BatchTaskCreateOutDTO()
btcOutDTO.setTaskID(json.loads(bc)['taskID'])
taskID = btcOutDTO.getTaskID()
# taskID = "11"
print("taskID==", taskID+ "\n")
# query a specified task
bq = batchProcess.queryOneTask(taskID, None, None, accessToken)
print("====== query a specified task ======")
print("result:", bq + "\n")
# query a specified task detail
bq = batchProcess.queryTaskDetails(bpTest.queryTaskDetailsInfo(), accessToken)
print("====== query a specified task detail ======")
print("result:", bq + "\n")
|
StarcoderdataPython
|
1629501
|
"""website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from examples.views import examples_index
from trips.views import trips_home, trips_post_detail
from forms.views import forms_home, forms_thanks
from Simple_CMS.views import CMS_home, CMS_add, CMS_detail, CMS_edit, CMS_delete
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', examples_index),
url(r'^trips/$', trips_home),
url(r'^trips/post/(?P<pk>\d+)/$', trips_post_detail, name='post_detail'),
url(r'^forms/$', forms_home),
url(r'^forms/thanks/$', forms_thanks),
url(r'^Simple_CMS/$', CMS_home),
url(r'^Simple_CMS/add$', CMS_add),
url(r'^Simple_CMS/article/(?P<id>\d+)/$', CMS_detail, name = 'CMS_detail'),
url(r'^Simple_CMS/article/(?P<id>\d+)/edit$', CMS_edit, name = 'CMS_edit'),
url(r'^Simple_CMS/article/(?P<id>\d+)/delete$', CMS_delete, name = 'CMS_delete'),
]
|
StarcoderdataPython
|
1709497
|
<filename>SAR/Proyecto/SAR_indexer.py
from Compendium import *
import os
import sys
if __name__ == '__main__':
if len(sys.argv)>1:
path = sys.argv[1]
savedCompName = sys.argv[2]
else:
path = "./mini_enero"
savedCompName = "newsComp"
collection = sorted(os.listdir(path))
newsCompendium = Compendium()
for volume in collection:
print(path + "/" + volume)
newsCompendium.addToCompendium(path + "/" + volume, True, False)
newsCompendium.saveCompendium(savedCompName)
print('Successfully saved '+path+" to "+savedCompName+".")
|
StarcoderdataPython
|
3325683
|
<filename>logics/upgrade_firmware.py<gh_stars>0
"""Reference: https://medium.com/@keagileageek/paramiko-how-to-ssh-and-file-transfers-with-python-75766179de73
"""
import os
from default_cfg import APP_CFG as dflt_cfg
from .app_constants import *
from logics.sessions import Session
# todo: cmd arguments 1:
# HOSTNAME = "10.100.57.99"
# todo: cmd arguments 2:
# local files path directory
# todo: give the remote download path of firmware which then downloads the files in files_container.
# also, replace the existing files if files_container has any with the same name.
# todo: verify the input of user coming through argsparse
# todo: add feature to keep files in files_container first. Once thats done, use parseargs to work on any local dir as per user's convenience.
# todo: release a package.
class UpgradeFirmware:
"""
todo: write later
"""
def __init__(self):
self._session = Session.get_session_instance()
@classmethod
def find_local_files_abs_paths(
cls,
local_files_dir: str = dflt_cfg[DEFAULT_LOCAL_FILES_DIR],
file_names: list = dflt_cfg[UPLOAD_FILE_NAMES]
) -> list:
"""
Find the absolute file paths of the files that needs to be uploaded
:param local_files_dir: absolute directory path where uploading files are present
:type local_files_dir: str
:param file_names: Name of the files
:type file_names: list
:return: absolute file path of the uploading files
:rtype: list
"""
# check user input directory is valid or not
if local_files_dir != dflt_cfg[DEFAULT_LOCAL_FILES_DIR]:
if not os.path.isdir(local_files_dir):
raise Exception("Directory of files does not exist")
# construct absolute addresses of these files
files_abs_paths = []
for f_name in file_names:
f_abs_path = os.path.join(local_files_dir, f_name)
files_abs_paths.append(f_abs_path)
# check if all the files exist or not
for f_abs_path in files_abs_paths:
if not os.path.isfile(f_abs_path):
raise Exception(f"{str(f_abs_path)} is not available")
print(f"Success: All the required files {file_names} are present")
return files_abs_paths
# todo: employ ctrl + c to terminate program
# todo: parseargs features
def upgrade_firmware(self, remote_file_dir: str = dflt_cfg[REMOTE_DIR_PATH]):
"""
todo: later
:param remote_file_dir:
:type remote_file_dir:
:return:
:rtype:
"""
upload_file_paths = self.find_local_files_abs_paths(
local_files_dir=dflt_cfg[DEFAULT_LOCAL_FILES_DIR])
self._session.start_session()
self._session.upload_files(upload_file_paths, remote_file_dir)
self._session.exec_cmd()
self._session.close_session()
|
StarcoderdataPython
|
1788283
|
from argparse import ArgumentParser
from rkd.api.contract import ExecutionContext
from .base import BaseTask
from ..encryption import EncryptionService
from ..exception import CryptographyKeysAlreadyCreated
class CryptographyKeysSetupTask(BaseTask):
"""Generates OpenGPG keys required for encryption.
Takes Backup Definition as input, then generates keys for connected Access to that Backup Definition.
Notice: Multiple Backup Definitions could share same Accesses"""
def get_name(self) -> str: return ':generate-keys'
def get_group_name(self) -> str: return ':crypto'
def configure_argparse(self, parser: ArgumentParser, with_definition: bool = True):
super().configure_argparse(parser, with_definition=with_definition)
parser.add_argument('--ignore-errors', action='store_true',
help='Do not panic when the keys are already created')
def execute(self, context: ExecutionContext) -> bool:
if not super().execute(context):
return False
definition_name = context.get_arg('definition')
definition = self.config.get_definition(definition_name)
ignore_errors = bool(context.get_arg('--ignore-errors'))
try:
EncryptionService(self._io).create_keys(definition.encryption())
except CryptographyKeysAlreadyCreated as e:
self.io().error_msg(e) if not ignore_errors else self.io().info_msg(e)
return ignore_errors
return True
class ListCryptoKeys(BaseTask):
"""List all cryptographic keys"""
def get_name(self) -> str: return ':list-keys'
def get_group_name(self) -> str: return ':crypto'
def configure_argparse(self, parser: ArgumentParser, with_definition: bool = False):
super().configure_argparse(parser, with_definition=with_definition)
parser.add_argument('--ignore-errors', action='store_true',
help='Do not panic when the keys are already created')
parser.add_argument('--definition', dest='definition', required=False,
help='Backup definition name to limit the list by (optional)')
def execute(self, context: ExecutionContext) -> bool:
if not super().execute(context):
return False
definition_name = context.get_arg('--definition')
definitions = [self.config.get_definition(definition_name)] if definition_name else self.config.definitions()\
.values()
body = []
for definition in definitions:
keys = EncryptionService(self._io).list_keys(definition.encryption())
body += list(map(lambda key: [
definition.name(),
definition.encryption().name(),
key['fingerprint'],
key['email'],
key['gpg_home']
], keys))
self.io().outln(self.table(
header=['Backup definition', 'Access name', 'Fingerprint', 'Details', 'GPG Directory'],
body=body))
return len(body) > 0
|
StarcoderdataPython
|
72477
|
<reponame>zjzh/nova<filename>nova/notifications/objects/compute_task.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.notifications.objects import base
from nova.notifications.objects import request_spec as reqspec_payload
from nova.objects import base as nova_base
from nova.objects import fields
@nova_base.NovaObjectRegistry.register_notification
class ComputeTaskPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'instance_uuid': fields.UUIDField(),
# There are some cases that request_spec is None.
# e.g. Old instances can still have no RequestSpec object
# attached to them.
'request_spec': fields.ObjectField('RequestSpecPayload',
nullable=True),
'state': fields.InstanceStateField(nullable=True),
'reason': fields.ObjectField('ExceptionPayload')
}
def __init__(self, instance_uuid, request_spec, state, reason):
super(ComputeTaskPayload, self).__init__()
self.instance_uuid = instance_uuid
self.request_spec = reqspec_payload.RequestSpecPayload(
request_spec) if request_spec is not None else None
self.state = state
self.reason = reason
@base.notification_sample('compute_task-build_instances-error.json')
@base.notification_sample('compute_task-migrate_server-error.json')
@base.notification_sample('compute_task-rebuild_server-error.json')
@nova_base.NovaObjectRegistry.register_notification
class ComputeTaskNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('ComputeTaskPayload')
}
|
StarcoderdataPython
|
3233857
|
# -*- coding: utf-8 -*-
import pyvistaqt as pv
def plot_mesh(self, indices=[]):
"""Plot the mesh using pyvista plotter.
Parameters
----------
self : MeshVTK
a MeshVTK object
indices : list
list of the points to extract (optional)
Returns
-------
"""
mesh = self.get_mesh(indices)
# Configure plot
p = pv.BackgroundPlotter()
p.set_background("white")
p.add_mesh(
mesh, color="grey", opacity=1, show_edges=True, edge_color="white", line_width=1
)
p.show()
|
StarcoderdataPython
|
3393081
|
<filename>QUIS SISTER 1/1194021_M. RIZKY_D4 TI - 3A/Semaphore.py
import logging
import threading
import time
import random
LOG_FORMAT = '%(asctime)s %(threadName)-17s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
semaphore = threading.Semaphore(0)
room = 0
exist_room = [1, 4, 7, 2, 9, 13, 15, 20]
def reservation():
logging.info('Melakukan reservasi hotel')
semaphore.acquire()
def resepsionis():
global room
time.sleep(1)
logging.info('Memproses hotel, silahkan menunggu')
time.sleep(3)
room = random.randint(1, 25)
if room not in exist_room:
exist_room.append(room)
logging.info('Kamar No. {} kosong, proses dilanjutkan'.format(room))
semaphore.release()
else:
logging.info('Kamar No. {} sudah terisi, pilih kamar lainnya'.format(room))
semaphore.release()
def main():
for i in range(10):
t1 = threading.Thread(target=reservation)
t2 = threading.Thread(target=resepsionis)
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3277227
|
import cv2
import numpy as np
img_path = "Resources/doctor strange.jpg"
width , height = 500,500
img = cv2.imread(img_path)
img_resized = cv2.resize(img ,(width,height) )
image_Cropped = img[0:900, 400:1000] # img[height, width] , y,x
print(img.shape) # (900,1600, 3)
cv2.imshow("Original", img)
cv2.imshow("Image Resized", img_resized)
cv2.imshow("Image Cropped", image_Cropped)
cv2.waitKey(0)
|
StarcoderdataPython
|
1700036
|
"""Helpers to facilitate the creation of macros that instantiate rules.
When writing a macro that instantiates multiple rules, one common problem
is how to forward additional kwargs to each rule instantiated.
For example, how to correctly forward "//visibility", or how to forward
"tags" or exec environment.
Additionally, when defining macros that create deploy rules as well as
test rules, it's common to have some "base parameters" that are shared
in a BUILD file, together with some parameters that are customized per
rule.
The functions and data types in this file help provide a generic
framework to handle those cases.
"""
load("//bazel/utils:merge_kwargs.bzl", "merge_kwargs")
def mconfig(*args, **kwargs):
"""Creates or overrides a dict representing rule configs.
This macro is normally used in BUILD.bazel files to define the
attributes of a rule or another macro.
This macro takes a list of dictionaries (*args) and key value
pairs (as **kwargs) and overlay them on top of each other, using
the semantics defined for merge_kwargs (scalars replace, dicts are
merged, lists are appended - uniquely).
For example:
>>> d1 = mconfig(foo = 1, bar = [2], baz = {'a': 1})
{"foo": 1, "bar": [2], "baz": {'a': 1}}
>>> mconfig(d1, foo = 2, bar = [3], baz = {'b': 2})
{"foo": 2, "bar": [2, 3], "baz": {'a': 1, 'b': 2}}
"""
args = list(args) + [kwargs]
if len(args) <= 1:
return args[0]
result = args[0]
for arg in args[1:]:
result = merge_kwargs(result, arg)
return result
def mconfig_get(defs, *args, default = {}):
"""Returns the value of the key supplied as *args in a config object.
This macro is normally used from within other macros defined in
*.bzl files, to retrieve the value from a dict, recursively.
Args:
*args: keys, used one after another.
default: default value to return if the key is not found.
Example:
>>> d1 = mconfig(srcs = [...], astore = mconfig(url = "uuu"), f = {'a': 1})
{"srcs": [...], "astore": {"url": "uuu"}, f: {'a': 1}}
>>> mconfig_get(d1, "astore", "url")
"uuu"
>>> mconfig_get(d1, "f", "a")
1
>>> mconfig_get(d1, "invalid")
{}
Returns:
For example, if *args is ("config", "astore", "root"), the code will
return the equivalent of defs["config"]["astore"]["root"].
"""
if defs == None:
return default
for a in args[:-1]:
defs = defs.get(a, {})
return defs.get(args[-1], default)
def mcreate_rule(current_name, rule_type, suffix, arg, *args):
if type(arg) == "string":
return arg
rargs = {}
for cfg in list(args) + [arg]:
rargs = merge_kwargs(rargs, cfg)
name = current_name
if suffix:
name = name + "-" + suffix
rule_type(
name = name,
**rargs
)
return ":" + name
|
StarcoderdataPython
|
1617944
|
__author__ = 'magus0219'
import re
import datetime
str_text = """
标签名:{name}
标签作者:{author}
标签作者email:{email}
标签commit:{commit_id}
标签时间:{tag_time}
标签描述:
{desc}
"""
class Tag():
def __init__(self, name, author, email, commit_id, desc, tag_time):
self.name = name
self.author = author
self.email = email
self.commit_id = commit_id
self.desc = desc
self.tag_time = tag_time
@staticmethod
def create_by_tag_info(tag_info):
"""Create a tag object by parsing output of command git show tagname
We only recognize annotation tag here, example of output of git show tagname here:
================================================
tag r0.0.3
Tagger: Arthur.Qin <<EMAIL>>
Date: Wed May 6 11:42:55 2015 +0800
release3
commit 04591b7527b85182dc517e1068e4cc94bd7d38d4
Merge: 32eff1d 9d9b243
Author: Arthur.Qin <<EMAIL>>
Date: Wed May 6 10:55:19 2015 +0800
Merge pull request #6 from baixing/master
merger baixing/master
================================================
"""
name = None
author = None
email = None
commit_id = None
desc = None
tag_time = None
pattern_name = 'tag\s+(r\d+\.\d+\.\d+)\n'
rst = re.search(pattern_name, tag_info)
if rst:
name = rst.group(1)
pattern_auther_email = 'Tagger:\s(.+)\s<(.+)>'
rst = re.search(pattern_auther_email, tag_info)
if rst:
author = rst.group(1)
email = rst.group(2)
pattern_tag_time = 'Tagger:.+\nDate:\s+(.+)\n'
rst = re.search(pattern_tag_time, tag_info)
if rst:
tag_time = rst.group(1)
tag_time = datetime.datetime.strptime(tag_time, '%a %b %d %H:%M:%S %Y %z')
pattern_desc = 'Date:.+\n\n((.|\n)+)\ncommit'
rst = re.search(pattern_desc, tag_info)
if rst:
desc = rst.group(1)
pattern_commit_id = 'commit\s(.+)\n'
rst = re.search(pattern_commit_id, tag_info)
if rst:
commit_id = rst.group(1)
if name:
return Tag(name, author, email, commit_id, desc, tag_time)
else:
return None
def __repr__(self):
return self.name
def __str__(self):
return str_text.format(name=self.name,
author=self.author,
email=self.email,
commit_id=self.commit_id,
tag_time=self.tag_time,
desc=self.desc)
|
StarcoderdataPython
|
51614
|
<reponame>le717/flask-google-fonts<filename>flask_google_fonts.py
from typing import Optional
from flask import Flask
from jinja2 import Markup
__all__ = ["GoogleFonts"]
class GoogleFonts:
"""Add fast-rendering Google Fonts to your Flask app.
Uses the techniques outlined in <NAME>'s post.
https://csswizardry.com/2020/05/the-fastest-google-fonts/
"""
def __init__(self, app: Optional[Flask] = None):
if app is not None:
self.init_app(app)
def init_app(self, app: Optional[Flask]):
if app is None:
raise TypeError("Parameter 'app' must be a Flask instance")
@app.context_processor
def context_processor() -> dict:
"""Register the extension with the app."""
return {"GoogleFonts": self.render}
def render(self, url: str) -> str:
"""Render fast-loading Google Fonts markup.
:param url - A base Google Fonts v1/2 url, without any query params.
:return str HTML code to go in the <head>.
"""
# Copied from https://csswizardry.com/2020/05/the-fastest-google-fonts/
# - 1. Preemptively warm up the fonts’ origin.
# - 2. Initiate a high-priority, asynchronous fetch for the CSS file.
# - Works in most modern browsers.
# - 3. Initiate a low-priority, asynchronous fetch that gets applied
# - to the page only after it’s arrived. Works in all browsers
# - with JavaScript enabled.
# - 4. In the unlikely event that a visitor has intentionally disabled
# - JavaScript, fall back to the original method. The good news is
# - that, although this is a render-blocking request, it can still
# - make use of the preconnect which makes it marginally faster
# - than the default.
html = """<link rel="preconnect" crossorigin href="https://fonts.gstatic.com">
<link rel="preload" as="style" href="{url}&display=swap">
<link rel="stylesheet" media="print" onload="this.media='all'" href="{url}&display=swap">
<noscript>
<link rel="stylesheet" href="{url}&display=swap">
</noscript>""".format(
url=url
)
return Markup(html)
|
StarcoderdataPython
|
3389274
|
<gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
x = np.arange(-1,9)
y = np.array([2, 1, .1, .05, .5, .1, -.15, .2, 1, 2])
xnew = np.linspace(x[0], x[-1], num=len(x)*100, endpoint=True)
f1 = interp1d(x, y)
f3 = interp1d(x, y, kind='cubic')
plt.figure(figsize=(5,3.5))
plt.plot(x, np.zeros(x.shape), '-', color='gray')
h0 = plt.plot(xnew, f3(xnew), '-k')[0]
plt.plot(x[:2], y[:2], '--k')
plt.plot(x[-2:], y[-2:], '--k')
h1 = plt.plot(x[1:-1], y[1:-1], '--ok')[0]
h2 = plt.plot(x[5:8], np.fabs(y[5:8]), ':ok', markerfacecolor='none')[0]
plt.xlim(x[[0,-1]])
plt.xticks([],[])
plt.yticks([0],[0])
plt.rc('text', usetex=True)
plt.legend([h0,h1,h2],
[r"$\alpha(\theta)$",r"$\alpha^{(k)}$",r"$|\alpha^{(k)}|$"],
loc='upper center',fontsize=16)
plt.xlabel(r'$\theta$', fontsize=16)
plt.ylabel(r'$\alpha$',rotation=0, fontsize=16)
plt.show()
|
StarcoderdataPython
|
96827
|
<filename>datasets/numeric_fused_head/numeric_fused_head.py
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NFH: Numeric Fused-Heads."""
import csv
import json
import datasets
_CITATION = """\
@article{elazar_head,
author = {<NAME> and <NAME>},
title = {Where’s My Head? Definition, Data Set, and Models for Numeric Fused-Head Identification and Resolution},
journal = {Transactions of the Association for Computational Linguistics},
volume = {7},
number = {},
pages = {519-535},
year = {2019},
doi = {10.1162/tacl\\_a\\_00280},
URL = {https://doi.org/10.1162/tacl_a_00280},
}
"""
_DESCRIPTION = """\
Fused Head constructions are noun phrases in which the head noun is \
missing and is said to be "fused" with its dependent modifier. This \
missing information is implicit and is important for sentence understanding.\
The missing heads are easily filled in by humans, but pose a challenge for \
computational models.
For example, in the sentence: "I bought 5 apples but got only 4.", 4 is a \
Fused-Head, and the missing head is apples, which appear earlier in the sentence.
This is a crowd-sourced dataset of 10k numerical fused head examples (1M tokens).
"""
_HOMEPAGE = "https://nlp.biu.ac.il/~lazary/fh/"
_LICENSE = "MIT"
_URLs = {
"identification": {
"train": "https://raw.githubusercontent.com/yanaiela/num_fh/master/data/identification/processed/train.tsv",
"test": "https://raw.githubusercontent.com/yanaiela/num_fh/master/data/identification/processed/test.tsv",
"dev": "https://raw.githubusercontent.com/yanaiela/num_fh/master/data/identification/processed/dev.tsv",
},
"resolution": {
"train": "https://raw.githubusercontent.com/yanaiela/num_fh/master/data/resolution/processed/nfh_train.jsonl",
"test": "https://raw.githubusercontent.com/yanaiela/num_fh/master/data/resolution/processed/nfh_test.jsonl",
"dev": "https://raw.githubusercontent.com/yanaiela/num_fh/master/data/resolution/processed/nfh_dev.jsonl",
},
}
class NumericFusedHead(datasets.GeneratorBasedBuilder):
"""NFH: Numeric Fused-Heads"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="identification", description="Identify NFH anchors in a sentence"),
datasets.BuilderConfig(name="resolution", description="Identify the head for the numeric anchor"),
]
def _info(self):
if self.config.name == "identification":
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"start_index": datasets.Value("int32"),
"end_index": datasets.Value("int32"),
"label": datasets.features.ClassLabel(names=["neg", "pos"]),
}
)
else:
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"line_indices": datasets.Sequence(datasets.Value("int32")),
"head": datasets.Sequence(datasets.Value("string")),
"speakers": datasets.Sequence(datasets.Value("string")),
"anchors_indices": datasets.Sequence(datasets.Value("int32")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_files = dl_manager.download_and_extract(_URLs[self.config.name])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
]
def _generate_examples(self, filepath):
""" Yields examples. """
with open(filepath, encoding="utf-8") as f:
if self.config.name == "identification":
r = csv.DictReader(f, delimiter="\t")
for id_, row in enumerate(r):
data = {
"tokens": row["text"].split("_SEP_"),
"start_index": row["ind_s"],
"end_index": row["ind_e"],
"label": "neg" if row["y"] == "0" else "pos",
}
yield id_, data
else:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"tokens": data["tokens"],
"line_indices": data["line_indices"],
"head": [str(s) for s in data["head"]],
"speakers": [str(s) for s in data["speakers"]],
"anchors_indices": data["anchors_indices"],
}
|
StarcoderdataPython
|
1679283
|
import logging
from os import environ
log = logging.getLogger(__name__)
class _Secrets:
"""Runtime abstraction exposing environment variables."""
def __init__(self) -> None:
"""Load attributes from environment."""
log.info("Loading secrets from environment")
keys = ("BOT_TOKEN", "TIKTOK_ID")
if any(key not in environ.keys() for key in keys):
raise Exception(f"Environment lacks required variables: {keys}")
self.bot_token = str(environ.get("BOT_TOKEN")) # Discord login token
self.tiktok_id = int(environ.get("TIKTOK_ID")) # TikTok application ID
Secrets = _Secrets()
|
StarcoderdataPython
|
3377450
|
<filename>per/migrations/0031_auto_20201106_1222.py
# Generated by Django 2.2.13 on 2020-11-06 12:22
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('per', '0030_auto_20201106_1205'),
]
operations = [
migrations.AlterField(
model_name='overview',
name='date_of_current_capacity_assessment',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date of current capacity assessment'),
preserve_default=False,
),
]
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.