content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0026_event_image'),
]
operations = [
migrations.CreateModel(
name='EventParticipation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ranking', models.PositiveIntegerField(null=True, verbose_name='Ranking', blank=True)),
('song_ranking', models.PositiveIntegerField(null=True, verbose_name='Song Ranking', blank=True)),
('points', models.PositiveIntegerField(null=True, verbose_name='Points', blank=True)),
('account', models.ForeignKey(related_name='events', to='api.Account')),
('event', models.ForeignKey(related_name='participations', to='api.Event')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='eventparticipation',
unique_together=set([('event', 'account')]),
),
migrations.AlterField(
model_name='card',
name='event',
field=models.ForeignKey(related_name='cards', blank=True, to='api.Event', null=True),
preserve_default=True,
),
]
| 36.589744 | 114 | 0.585144 | [
"Apache-2.0"
] | MagiCircles/SchoolIdolAPI | api/migrations/0027_auto_20150227_2321.py | 1,427 | Python |
# import the definition of the steps and input files:
from Configuration.PyReleaseValidation.relval_steps import *
# here only define the workflows as a combination of the steps defined above:
workflows = Matrix()
# each workflow defines a name and a list of steps to be done.
# if no explicit name/label given for the workflow (first arg),
# the name of step1 will be used
from Configuration.PyReleaseValidation.relval_upgrade import workflows as _upgrade_workflows
#just define all of them
#WFs to run in IB:
# 2017 (ele guns 10, 35, 1000; pho guns 10, 35; mu guns 1, 10, 100, 1000, QCD 3TeV, QCD Flat)
# (ZMM, TTbar, ZEE, MinBias, TTbar PU, ZEE PU, TTbar design)
# (TTbar trackingOnly, trackingRun2, trackingOnlyRun2, trackingLowPU, pixelTrackingOnly)
# 2018 (ele guns 10, 35, 1000; pho guns 10, 35; mu guns 1, 10, 100, 1000, QCD 3TeV, QCD Flat)
# 2018 (ZMM, TTbar, ZEE, MinBias, TTbar PU, ZEE PU, TTbar design)
# (TTbar trackingOnly, pixelTrackingOnly)
# (HE collapse: TTbar, TTbar PU, TTbar design)
# (ParkingBPH: TTbar)
# (Patatrack pixel-only: ZMM - on CPU, on GPU, both, auto)
# (Patatrack pixel-only: TTbar - on CPU, on GPU, both, auto)
# (Patatrack ECAL-only: TTbar - on CPU, on GPU, both, auto)
# (Patatrack HCAL-only: TTbar - on CPU, on GPU, both, auto)
# 2021 (ZMM, TTbar, ZEE, MinBias, TTbar PU, TTbar PU premix, ZEE PU, TTbar design)
# (TTbar trackingMkFit)
# (Patatrack pixel-only: ZMM - on CPU, on GPU, both, auto)
# (Patatrack pixel-only: TTbar - on CPU, on GPU, both, auto)
# (Patatrack ECAL-only: TTbar - on CPU, on GPU, both, auto)
# (Patatrack HCAL-only: TTbar - on CPU, on GPU, both, auto)
# 2023 (TTbar, TTbar PU, TTbar PU premix)
# 2024 (TTbar, TTbar PU, TTbar PU premix)
numWFIB = [10001.0,10002.0,10003.0,10004.0,10005.0,10006.0,10007.0,10008.0,10009.0,10059.0,10071.0,
10042.0,10024.0,10025.0,10026.0,10023.0,10224.0,10225.0,10424.0,
10024.1,10024.2,10024.3,10024.4,10024.5,
10801.0,10802.0,10803.0,10804.0,10805.0,10806.0,10807.0,10808.0,10809.0,10859.0,10871.0,
10842.0,10824.0,10825.0,10826.0,10823.0,11024.0,11025.0,11224.0,
10824.1,10824.5,
10824.6,11024.6,11224.6,
10824.8,
10842.501,10842.502, # 10842.503,10842.504,
10824.501,10824.502, # 10824.503,10824.504,
# 10824.511,10824.512,10824.513,10824.514,
# 10824.521,10824.522,10824.523,10824.524,
11650.0,11634.0,11646.0,11640.0,11834.0,11834.99,11846.0,12024.0,
11634.7,
11650.501,11650.502, # 11650.503,11650.504,
11634.501,11634.502, # 11634.503,11634.504,
# 11634.511,11634.512,11634.513,11634.514,
# 11634.521,11634.522,11634.523,11634.524,
12434.0,12634.0,12634.99,
12834.0,13034.0,13034.99]
for numWF in numWFIB:
if not numWF in _upgrade_workflows: continue
workflows[numWF] = _upgrade_workflows[numWF]
| 50.5 | 99 | 0.649175 | [
"Apache-2.0"
] | AlionaD/cmssw | Configuration/PyReleaseValidation/python/relval_2017.py | 3,030 | Python |
import MPS_class as MPS
import MPO_class as MPO
from ncon import ncon
import numpy as np
from scipy.linalg import expm
#%%
def TEBD_evo(MPS_,Lx,Ly,J=1,epsilon=0.1,etrunc=0,chiMAX=256,chiMAXswap=256,info=True):
L = Lx*Ly
config = np.arange(0,L).reshape(Lx,Ly)
theta = (np.pi+2*epsilon)
flip_op = np.eye(2)*np.cos(theta/2) - 1j*np.sin(theta/2)*np.array([[0,1],[1,0]])
sigma_z = np.array([[1,0],[0,-1]])
Uprop = expm(-1j*np.kron(sigma_z,-J*sigma_z)).reshape(2,2,2,2)
nn_list_forward = [[] for x in range(L)]
for x in range(L):
i,j = np.where(config == x)
if j != Ly-1: nn_list_forward[x].append( config[i,j+1])
if i != Lx-1: nn_list_forward[x].append( config[i+1,j])
nn_list_forward[x] = np.array(nn_list_forward[x]).ravel()
nn_list_backward = [[] for x in range(L)]
for x in reversed(range(L)):
i,j = np.where(config == x)
if j != 0: nn_list_backward[x].append( config[i,j-1])
if i != 0: nn_list_backward[x].append( config[i-1,j])
nn_list_backward[x] = np.array(nn_list_backward[x]).ravel()
for x in range(L):
for nn in nn_list_forward[x]:
# If they are nearest neighbours
if nn == x+1:
shpM1,shpM2 = MPS_.M[x].shape, MPS_.M[nn].shape
Theta = ncon([MPS_.M[x],MPS_.M[nn],Uprop],[[-1,2,1],[1,3,-4],[2,3,-2,-3]])
Theta = Theta.reshape(shpM1[0]*shpM1[1], shpM2[1]*shpM2[2])
U,S,V = MPS.svdtruncate(Theta, etrunc, chiMAX,info=info)
MPS_.M[x] = U.reshape(shpM1[0],shpM1[1],S.size)
MPS_.M[nn] = (np.diag(S)@V).reshape(S.size,shpM2[1],shpM2[2])
else:
for index in range(x,nn-1):
MPS_.swap(index,chiMAX=chiMAXswap,info=info)
shpM1,shpM2 = MPS_.M[nn-1].shape, MPS_.M[nn].shape
Theta = ncon([MPS_.M[nn-1],MPS_.M[nn],Uprop],[[-1,2,1],[1,3,-4],[2,3,-2,-3]])
Theta = Theta.reshape(shpM1[0]*shpM1[1], shpM2[1]*shpM2[2])
U,S,V = MPS.svdtruncate(Theta, etrunc, chiMAX,info=info)
MPS_.M[nn-1] = ([email protected](S)).reshape(shpM1[0],shpM1[1],S.size)
MPS_.M[nn] = V.reshape(S.size,shpM2[1],shpM2[2])
for index in reversed(range(x,nn-1)):
MPS_.swap(index,chiMAX=chiMAXswap,info=info)
MPS_.M[x] = ncon([MPS_.M[x],flip_op],[[-1,1,-3],[1,-2]])
for x in reversed(range(L)):
for nn in nn_list_backward[x]:
# If they are nearest neighbours
if nn == x-1:
shpM1,shpM2 = MPS_.M[nn].shape, MPS_.M[x].shape
Theta = ncon([MPS_.M[nn],MPS_.M[x],Uprop],[[-1,2,1],[1,3,-4],[2,3,-2,-3]])
Theta = Theta.reshape(shpM1[0]*shpM1[1], shpM2[1]*shpM2[2])
U,S,V = MPS.svdtruncate(Theta, etrunc, chiMAX,info=info)
MPS_.M[nn] = ([email protected](S)).reshape(shpM1[0],shpM1[1],S.size)
MPS_.M[x] = (V).reshape(S.size,shpM2[1],shpM2[2])
else:
for index in range(x-1,nn,-1):
MPS_.swap(index,chiMAX=chiMAXswap,center='i',info=info)
shpM1,shpM2 = MPS_.M[nn].shape, MPS_.M[nn+1].shape
Theta = ncon([MPS_.M[nn],MPS_.M[nn+1],Uprop],[[-1,2,1],[1,3,-4],[2,3,-2,-3]])
Theta = Theta.reshape(shpM1[0]*shpM1[1], shpM2[1]*shpM2[2])
U,S,V = MPS.svdtruncate(Theta, etrunc, chiMAX,info=info)
MPS_.M[nn] = U.reshape(shpM1[0],shpM1[1],S.size)
MPS_.M[nn+1] = (np.diag(S)@V).reshape(S.size,shpM2[1],shpM2[2])
for index in reversed(range(x-1,nn,-1)):
MPS_.swap(index,chiMAX=chiMAXswap,center='i',info=info)
MPS_.M[x] = ncon([MPS_.M[x],flip_op],[[-1,1,-3],[1,-2]])
Lx = 5
Ly = Lx
L = Lx*Ly
psi_state = MPS.getAllUp(L)
mag = []
err = 0.
info = True
mag.append(MPO.return_LocalMz(psi_state).real.reshape(Lx,Ly))
for k in range(20):
print('k',k,np.max(mag[k]-mag[k].T))
for x in psi_state.M:
print(x.shape)
TEBD_evo(psi_state, Lx, Ly,J = -1j,chiMAX=256,chiMAXswap=256,etrunc=1e-12,info=info)
mag.append(MPO.return_LocalMz(psi_state).real.reshape(Lx,Ly))
mag = np.array(mag)
#%%
from scipy.sparse.linalg import expm_multiply
import scipy.sparse as sps
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
import seaborn as sns
def nested_tensor(lst):
if len(lst) == 2:
return sps.kron(lst[0],lst[1],format='csc')
else:
return sps.kron(lst[0], nested_tensor(lst[1:]),format='csc')
def spin_op_construct(sigma, j, L):
before = [sps.eye(2) for _ in range(j)]
mid = [sigma]
after = [sps.eye(2) for _ in range(j+1,L)]
return nested_tensor(before+mid+after)
def int_spin_op_construct(sigma1,sigma2,i1,i2,L):
if i2 < i1:
i1,i2 = i2,i1
before1 = [sps.eye(2) for _ in range(i1)]
mid1 = [sigma1]
after1 = [sps.eye(2) for _ in range(i1+1,i2)]
mid2 = [sigma2]
after2 = [sps.eye(2) for _ in range(i2+1,L)]
return nested_tensor(before1+mid1+after1+mid2+after2)
def H1(L, epsilon):
sigma_x = sps.csc_matrix(np.array([[0,1],[1,0]]))
op1 = 0
for i in range(L):
op1 += spin_op_construct(-sigma_x*(np.pi/2+epsilon),i,L)
return op1
def H2(Lx, Ly, J=1):
L = Lx*Ly
sigma_z = sps.csc_matrix(np.array([[1,0],[0,-1]]))
op2 = 0
config = np.arange(L).reshape(Lx,Ly)
for i in range(Lx):
for j in range(Ly):
nn = []
if i != Lx-1:
nn.append(config[i+1,j])
if j != Ly-1:
nn.append(config[i,j+1])
op = 0
for x in nn:
op += int_spin_op_construct(-J*sigma_z,sigma_z,config[i,j],x,L)
op2 += op
return op2
def H2_pbc(Lx, Ly, J=1):
L = Lx*Ly
sigma_z = sps.csc_matrix(np.array([[1,0],[0,-1]]))
op2 = 0
config = np.arange(L).reshape(Lx,Ly)
for i in range(Lx):
for j in range(Ly):
nn = []
nn.append(config[(i+1)%(Lx-1),j])
nn.append(config[i,(j+1)%(Ly-1)])
op = 0
for x in nn:
op += int_spin_op_construct(-J*sigma_z,sigma_z,config[i,j],x,L)
op2 += op
return op2
def H2_pbc1D(L, J=1):
sigma_z = sps.csc_matrix(np.array([[1,0],[0,-1]]))
op2 = 0
for i in range(L):
op2 += int_spin_op_construct(-J*sigma_z,sigma_z,i,(i+1)%L,L)
return op2
def H2_pbc1D_var(L, J=1):
sigma_z = sps.csc_matrix(np.array([[1,0],[0,-1]]))
op2 = 0
for i in range(1,L-1):
op2 += int_spin_op_construct(-J*sigma_z,sigma_z,i,(i+1),L)
op2 += spin_op_construct(-J*0.5*np.eye(2), L-1, L)
op2 += spin_op_construct(-J*0.5*np.eye(2), 0, L)
return op2
Lx = 4;
Ly = Lx;
L = Lx*Ly;
D = 2**L
en = []
mz_config = np.zeros(D)
for i,state in enumerate(np.vectorize(np.binary_repr)(np.arange(2**L),L)):
mz_config[i] = (L-2*state.count('1'))/L
Hdouble_1d = H2_pbc1D(L)
Hdouble_1dv = H2_pbc1D_var(L)
epsilon = 0.1
Hsingle = H1(L,epsilon)
psi0 = np.zeros(D)
psi0[0] = 1
psi1dv = [psi0]
psi1d = [psi0]
for n in range(200):
print(n,' ',end='')
psi1dv.append(expm_multiply(-1j*Hsingle,expm_multiply(-1j*Hdouble_1dv,psi1dv[-1])))
psi1d.append(expm_multiply(-1j*Hsingle,expm_multiply(-1j*Hdouble_1d,psi1d[-1])))
#%%
psi1dv = np.array(psi1dv)
psi1d = np.array(psi1d)
mag_ED = np.abs(psi1dv)**2@mz_config.reshape(D,1)
mag_ED = mag_ED.reshape(mag_ED.size)
mag_ED1d = np.abs(psi1d)**2@mz_config.reshape(D,1)
mag_ED1d = mag_ED1d.reshape(mag_ED1d.size)
plt.plot(np.abs(mag_ED))
plt.plot(np.abs(mag_ED1d))
#%%
L1d = np.zeros(psi1d.shape[0])
L2d = np.zeros(psi1d.shape[0])
for x in range(psi1d.shape[0]):
if x%2 == 0: k = 0
else: k=-1
L1d[x] = np.abs(psi1d[x,k])**2
L2d[x] = np.abs(psi[x,k])**2
#%%
K = (sps.eye(2**L)-1j*Hsingle)
U = (sps.eye(2**L)-1j*Hdouble)
psi_1 = []
psi_1.append(psi0)
for n in range(100):
print(n,' ',end='')
psi_1.append(K.dot(U.dot(psi_1[-1])))
psi_1[-1] /= np.linalg.norm(psi_1[-1])
psi_1 = np.array(psi_1)
mag_ED1 = np.abs(psi_1)**2@mz_config.reshape(D,1)
plt.plot(mag_ED)
plt.plot(mag_ED1) | 34.27686 | 94 | 0.563472 | [
"MIT"
] | alessandro-santini/Tensor-Network | tebd_floquet.py | 8,295 | Python |
"""
link: https://leetcode-cn.com/problems/smallest-rectangle-enclosing-black-pixels
problem: 给定 0, 1 矩阵,以及一个矩阵中为 1 的点坐标,求包含矩阵中所有的1的最小矩形面积
solution: 暴搜。忽略坐标,直接遍历所有节点,找到上下左右四个边界点,时间O(nm)。
solution-fix: 二分。将x轴投影到y轴,y轴投影到x轴,形成两个一维数组。显然数组形如下图。而 x, y 坐标为界,两侧各为非严格递增和递减
1: +------+
0: -----+ +-----
四次二分找到递增递减边界,时间复杂度 O(nlogn*mlogm)
"""
class Solution:
def minArea(self, image: List[List[str]], x: int, y: int) -> int:
if not image or not image[0]:
return 0
n, m = len(image), len(image[0])
a, b, c, d = n, m, 0, 0
for i in range(n):
for j in range(m):
if image[i][j] == '1':
a = min(a, i)
b = min(b, j)
c = max(c, i)
d = max(d, j)
return (c + 1 - a) * (d + 1 - b)
# ---
class Solution:
def minArea(self, image: List[List[str]], x: int, y: int) -> int:
if not image or not image[0]:
return 0
n, m = len(image), len(image[0])
def search_column(l: int, r: int, up: bool) -> int:
k = r if up else l
while l <= r:
mid, mid_k = (l + r) >> 1, 0
for i in range(m):
if image[mid][i] == '1':
mid_k = 1
break
if mid_k:
k = min(k, mid) if up else max(k, mid)
if mid_k ^ up:
l = mid + 1
else:
r = mid - 1
return k
def search_row(l: int, r: int, up: bool) -> int:
k = r if up else l
while l <= r:
mid, mid_k = (l + r) >> 1, 0
for i in range(n):
if image[i][mid] == '1':
mid_k = 1
break
if mid_k:
k = min(k, mid) if up else max(k, mid)
if mid_k ^ up:
l = mid + 1
else:
r = mid - 1
return k
a = search_column(0, x, True)
b = search_row(0, y, True)
c = search_column(x, n - 1, False)
d = search_row(y, m - 1, False)
return (c + 1 - a) * (d + 1 - b)
| 31.053333 | 80 | 0.396737 | [
"Apache-2.0"
] | windniw/just-for-fun | leetcode/302.py | 2,601 | Python |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
import config
import command
LEADER = 1
ROUTER1 = 2
DUT_ROUTER2 = 3
ROUTER3 = 4
MED1 = 5
MED1_TIMEOUT = 3
class Cert_5_3_3_AddressQuery(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,6):
self.nodes[i] = node.Node(i, (i == MED1), simulator=self.simulator)
self.nodes[LEADER].set_panid()
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[DUT_ROUTER2].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER3].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid()
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[DUT_ROUTER2].set_panid()
self.nodes[DUT_ROUTER2].set_mode('rsdn')
self.nodes[DUT_ROUTER2].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[DUT_ROUTER2].add_whitelist(self.nodes[ROUTER3].get_addr64())
self.nodes[DUT_ROUTER2].add_whitelist(self.nodes[MED1].get_addr64())
self.nodes[DUT_ROUTER2].enable_whitelist()
self.nodes[DUT_ROUTER2].set_router_selection_jitter(1)
self.nodes[ROUTER3].set_panid()
self.nodes[ROUTER3].set_mode('rsdn')
self.nodes[ROUTER3].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER3].add_whitelist(self.nodes[DUT_ROUTER2].get_addr64())
self.nodes[ROUTER3].enable_whitelist()
self.nodes[ROUTER3].set_router_selection_jitter(1)
self.nodes[MED1].set_panid()
self.nodes[MED1].set_mode('rsn')
self.nodes[MED1].add_whitelist(self.nodes[DUT_ROUTER2].get_addr64())
self.nodes[MED1].set_timeout(MED1_TIMEOUT)
self.nodes[MED1].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
del self.simulator
def test(self):
# 1
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
self.nodes[DUT_ROUTER2].start()
self.nodes[ROUTER3].start()
self.nodes[MED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.assertEqual(self.nodes[DUT_ROUTER2].get_state(), 'router')
self.assertEqual(self.nodes[ROUTER3].get_state(), 'router')
self.assertEqual(self.nodes[MED1].get_state(), 'child')
# 2
# Flush the message queue to avoid possible impact on follow-up verification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
router3_mleid = self.nodes[ROUTER3].get_ip6_address(config.ADDRESS_TYPE.ML_EID)
self.assertTrue(self.nodes[MED1].ping(router3_mleid))
# Verify DUT_ROUTER2 sent an Address Query Request to the Realm local address.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_messages.next_coap_message('0.02', '/a/aq')
command.check_address_query(msg, self.nodes[DUT_ROUTER2], config.REALM_LOCAL_ALL_ROUTERS_ADDRESS)
# 3
# Wait the finish of address resolution traffic triggerred by previous ping.
self.simulator.go(5)
# Flush the message queue to avoid possible impact on follow-up verification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
med1_mleid = self.nodes[MED1].get_ip6_address(config.ADDRESS_TYPE.ML_EID)
self.assertTrue(self.nodes[ROUTER1].ping(med1_mleid))
# Verify DUT_ROUTER2 responded with an Address Notification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_messages.next_coap_message('0.02', '/a/an')
command.check_address_notification(msg, self.nodes[DUT_ROUTER2], self.nodes[ROUTER1])
# 4
# Wait the finish of address resolution traffic triggerred by previous ping.
self.simulator.go(5)
# Flush the message queue to avoid possible impact on follow-up verification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
self.assertTrue(self.nodes[MED1].ping(router3_mleid))
# Verify DUT_ROUTER2 didn't send an Address Query Request.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_messages.next_coap_message('0.02', '/a/aq', False)
assert msg is None, "The Address Query Request is not expected."
# 5
self.nodes[ROUTER3].stop()
# Wait for the Leader to expire its Router ID.
# MAX_NEIGHBOR_AGE + INFINITE_COST_TIMEOUT + ID_REUSE_DELAY + propagation time + transmission time ~ 580s.
self.simulator.go(580)
# Flush the message queue to avoid possible impact on follow-up verification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
self.assertFalse(self.nodes[MED1].ping(router3_mleid))
# Verify DUT_ROUTER2 sent an Address Query Request to the Realm local address.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_messages.next_coap_message('0.02', '/a/aq')
command.check_address_query(msg, self.nodes[DUT_ROUTER2], config.REALM_LOCAL_ALL_ROUTERS_ADDRESS)
# 6
self.nodes[MED1].stop()
self.simulator.go(MED1_TIMEOUT)
# Flush the message queue to avoid possible impact on follow-up verification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
self.assertFalse(self.nodes[ROUTER1].ping(med1_mleid))
self.assertFalse(self.nodes[ROUTER1].ping(med1_mleid))
# Verify DUT_ROUTER2 didn't respond with an Address Notification.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_messages.next_coap_message('0.02', '/a/an', False)
assert msg is None, "The Address Notification is not expected."
if __name__ == '__main__':
unittest.main()
| 43.037838 | 114 | 0.706104 | [
"BSD-3-Clause"
] | VictorAtPL/openthread | tests/scripts/thread-cert/Cert_5_3_03_AddressQuery.py | 7,962 | Python |
import re
from haystack.inputs import Exact, Clean, BaseInput
from api.helpers.parse_helper import has_balanced_parentheses, matched_parens
class ElasticSearchExtendedAutoQuery(BaseInput):
"""
A convenience class that handles common user queries.
In addition to cleaning all tokens, it handles double quote bits as
exact matches & terms with '-' in front as NOT queries.
"""
input_type_name = 'auto_query'
post_process = False
exact_match_re = re.compile(r'"(?P<phrase>.*?)"')
uncleaned_tokens = [
'OR',
'AND',
'NOT',
'TO',
]
to_be_removed_special_chars_translation_table = {ord(c): None for c in matched_parens}
def prepare(self, query_obj):
query_string = super(ElasticSearchExtendedAutoQuery, self).prepare(query_obj)
# Remove parens if they are not balanced
if not has_balanced_parentheses(query_string):
query_string = query_string.translate(self.to_be_removed_special_chars_translation_table)
exacts = self.exact_match_re.findall(query_string)
tokens = []
query_bits = []
for rough_token in self.exact_match_re.split(query_string):
if not rough_token:
continue
elif rough_token not in exacts:
# We have something that's not an exact match but may have more
# than on word in it.
tokens.extend(rough_token.split(' '))
else:
tokens.append(rough_token)
for token in tokens:
if not token:
continue
if token in exacts:
query_bits.append(Exact(token, clean=True).prepare(query_obj))
elif token in self.uncleaned_tokens:
query_bits.append(token)
else:
query_bits.append(Clean(token).prepare(query_obj))
return u' '.join(query_bits)
| 34.5 | 101 | 0.630952 | [
"MIT"
] | geometalab/G4SE-Compass | compass-api/G4SE/api/helpers/input.py | 1,932 | Python |
"""Mark channels in an existing BIDS dataset as "bad".
example usage:
$ mne_bids mark_bad_channels --ch_name="MEG 0112" --description="noisy" \
--ch_name="MEG 0131" --description="flat" \
--subject_id=01 --task=experiment --session=test \
--bids_root=bids_root --overwrite
"""
# Authors: Richard Höchenberger <[email protected]>
#
# License: BSD-3-Clause
from mne.utils import logger
import mne_bids
from mne_bids.config import reader
from mne_bids import BIDSPath, mark_bad_channels
def run():
"""Run the mark_bad_channels command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__, usage="usage: %prog options args",
prog_prefix='mne_bids',
version=mne_bids.__version__)
parser.add_option('--ch_name', dest='ch_names', action='append',
default=[],
help='The names of the bad channels. If multiple '
'channels are bad, pass the --ch_name parameter '
'multiple times.')
parser.add_option('--description', dest='descriptions', action='append',
default=[],
help='Descriptions as to why the channels are bad. '
'Must match the number of bad channels provided. '
'Pass multiple times to supply more than one '
'value in that case.')
parser.add_option('--bids_root', dest='bids_root',
help='The path of the folder containing the BIDS '
'dataset')
parser.add_option('--subject_id', dest='subject',
help=('Subject name'))
parser.add_option('--session_id', dest='session',
help='Session name')
parser.add_option('--task', dest='task',
help='Task name')
parser.add_option('--acq', dest='acquisition',
help='Acquisition parameter')
parser.add_option('--run', dest='run',
help='Run number')
parser.add_option('--proc', dest='processing',
help='Processing label.')
parser.add_option('--rec', dest='recording',
help='Recording name')
parser.add_option('--type', dest='datatype',
help='Recording data type, e.g. meg, ieeg or eeg')
parser.add_option('--suffix', dest='suffix',
help='The filename suffix, i.e. the last part before '
'the extension')
parser.add_option('--ext', dest='extension',
help='The filename extension, including the leading '
'period, e.g. .fif')
parser.add_option('--overwrite', dest='overwrite', action='store_true',
help='Replace existing channel status entries')
parser.add_option('--verbose', dest='verbose', action='store_true',
help='Whether do generate additional diagnostic output')
opt, args = parser.parse_args()
if args:
parser.print_help()
parser.error(f'Please do not specify arguments without flags. '
f'Got: {args}.\n')
if opt.bids_root is None:
parser.print_help()
parser.error('You must specify bids_root')
if opt.ch_names is None:
parser.print_help()
parser.error('You must specify some --ch_name parameters.')
ch_names = [] if opt.ch_names == [''] else opt.ch_names
bids_path = BIDSPath(subject=opt.subject, session=opt.session,
task=opt.task, acquisition=opt.acquisition,
run=opt.run, processing=opt.processing,
recording=opt.recording, datatype=opt.datatype,
suffix=opt.suffix, extension=opt.extension,
root=opt.bids_root)
bids_paths = bids_path.match()
# Only keep data we can actually read & write.
allowed_extensions = list(reader.keys())
bids_paths = [p for p in bids_paths
if p.extension in allowed_extensions]
if not bids_paths:
logger.info('No matching files found. Please consider using a less '
'restrictive set of entities to broaden the search.')
return # XXX should be return with an error code?
logger.info(f'Marking channels {", ".join(ch_names)} as bad in '
f'{len(bids_paths)} recording(s) …')
for bids_path in bids_paths:
logger.info(f'Processing: {bids_path.basename}')
mark_bad_channels(ch_names=ch_names, descriptions=opt.descriptions,
bids_path=bids_path, overwrite=opt.overwrite,
verbose=opt.verbose)
if __name__ == '__main__':
run()
| 43.690265 | 79 | 0.569374 | [
"BSD-3-Clause"
] | adam2392/mne-bids | mne_bids/commands/mne_bids_mark_bad_channels.py | 4,940 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import json
import os
from pymisp import ExpandedPyMISP
from settings import url, key, ssl, outputdir, filters, valid_attribute_distribution_levels
try:
from settings import with_distribution
except ImportError:
with_distribution = False
try:
from settings import include_deleted
except ImportError:
include_deleted = False
try:
from settings import exclude_attribute_types
except ImportError:
exclude_attribute_types = []
valid_attribute_distributions = []
def init():
# If we have an old settings.py file then this variable won't exist
global valid_attribute_distributions
try:
valid_attribute_distributions = [int(v) for v in valid_attribute_distribution_levels]
except Exception:
valid_attribute_distributions = [0, 1, 2, 3, 4, 5]
return ExpandedPyMISP(url, key, ssl)
def saveEvent(event):
try:
with open(os.path.join(outputdir, f'{event["Event"]["uuid"]}.json'), 'w') as f:
json.dump(event, f, indent=2)
except Exception as e:
print(e)
sys.exit('Could not create the event dump.')
def saveHashes(hashes):
try:
with open(os.path.join(outputdir, 'hashes.csv'), 'w') as hashFile:
for element in hashes:
hashFile.write('{},{}\n'.format(element[0], element[1]))
except Exception as e:
print(e)
sys.exit('Could not create the quick hash lookup file.')
def saveManifest(manifest):
try:
manifestFile = open(os.path.join(outputdir, 'manifest.json'), 'w')
manifestFile.write(json.dumps(manifest))
manifestFile.close()
except Exception as e:
print(e)
sys.exit('Could not create the manifest file.')
if __name__ == '__main__':
misp = init()
try:
events = misp.search_index(minimal=True, **filters, pythonify=False)
except Exception as e:
print(e)
sys.exit("Invalid response received from MISP.")
if len(events) == 0:
sys.exit("No events returned.")
manifest = {}
hashes = []
counter = 1
total = len(events)
for event in events:
try:
e = misp.get_event(event['uuid'], deleted=include_deleted, pythonify=True)
if exclude_attribute_types:
for i, attribute in enumerate(e.attributes):
if attribute.type in exclude_attribute_types:
e.attributes.pop(i)
e_feed = e.to_feed(valid_distributions=valid_attribute_distributions, with_meta=True, with_distribution=with_distribution)
except Exception as err:
print(err, event['uuid'])
continue
if not e_feed:
print(f'Invalid distribution {e.distribution}, skipping')
continue
hashes += [[h, e.uuid] for h in e_feed['Event'].pop('_hashes')]
manifest.update(e_feed['Event'].pop('_manifest'))
saveEvent(e_feed)
print("Event " + str(counter) + "/" + str(total) + " exported.")
counter += 1
saveManifest(manifest)
print('Manifest saved.')
saveHashes(hashes)
print('Hashes saved. Feed creation completed.')
| 31.431373 | 134 | 0.639426 | [
"BSD-2-Clause"
] | JMoretS21Sec/PyMISP | examples/feed-generator/generate.py | 3,206 | Python |
#!/usr/bin/env python3
from hopla.hoplalib.user.usermodels import HabiticaUser
class TestHabiticaUser:
def test_get_stats(self):
user_test_stat_values = {
"buffs": {
"str": 50, "int": 50, "per": 3206, "con": 50, "stealth": 0, "streaks": False,
"snowball": False, "spookySparkles": False, "shinySeed": False, "seafoam": False
},
"training": {"int": 0, "per": 0, "str": 0, "con": 0},
"hp": 50, "mp": 65.7, "exp": 2501, "gp": 1072327.9, "lvl": 121,
"class": "wizard", "points": 0, "str": 0, "con": 0, "int": 12, "per": 88,
"toNextLevel": 5010, "maxHealth": 50, "maxMP": 304
}
user = HabiticaUser(user_dict={"stats": user_test_stat_values})
assert user.get_stats() == user_test_stat_values
def test_get_auth(self):
user_test_auth_values = {
"local": {"username": "hopla", "lowerCaseUsername": "hopla",
"email": "[email protected]"
},
"timestamps": {"created": "2022-03-22T24:23:38.119Z",
"loggedin": "2022-09-18T08:47:45.286Z",
"updated": "2022-09-18T14:20:55.530Z"
},
"facebook": {}, "google": {}, "apple": {}
}
user = HabiticaUser(user_dict={"auth": user_test_auth_values})
assert user.get_auth() == user_test_auth_values
def test_get_inventory(self):
inventory = {
"gear": {"equipped": {"back": "back_special_aetherCloak"},
"costume": {"armor": "armor_armoire_bluePartyDress", "body": "body_base_0"},
"owned": {"armor_special_fall2019Healer": True}},
"special": {"goodluck": 9000},
"lastDrop": {"count": 80, "date": "2021-10-12T15:45:30.384Z"},
"pets": {"Cactus-Golden": 0, "Unicorn-Red": -1, "Wolf-CottonCandyPink": 5},
"eggs": {"Dragon": 338, "Nudibranch": 3, "TRex": 0},
"hatchingPotions": {"Desert": 456, "MossyStone": 1},
"food": {"RottenMeat": 846},
"mounts": {"Fox-RoyalPurple": True, "Dragon-Skeleton": None, "Wolf-MossyStone": True},
"quests": {"trex_undead": 0},
"currentPet": "Egg-Base",
"currentMount": "Aether-Invisible"
}
user = HabiticaUser(user_dict={"items": inventory})
assert user.get_inventory() == inventory
def test_get_gp(self):
gp = 12.0
user = HabiticaUser(user_dict={"stats": {"gp": gp}})
assert user.get_gp() == gp
def test_get_mp(self):
mp = 112.0
user = HabiticaUser(user_dict={"stats": {"mp": mp}})
assert user.get_mp() == mp
def test_get_pets(self):
pets = {"Spider-Base": -1, "TRex-Skeleton": 5}
user = HabiticaUser(user_dict={"items": {"pets": pets}})
assert user.get_pets() == pets
def test_get_mounts(self):
mounts = {"Spider-Base": None, "TRex-Skeleton": True}
user = HabiticaUser(user_dict={"items": {"mounts": mounts}})
assert user.get_mounts() == mounts
def test_get_food(self):
food = {"CottonCandyBlue": 10, "Fish": 830}
user = HabiticaUser(user_dict={"items": {"food": food}})
assert user.get_food() == food
def test_get_hatch_potions(self):
hatch_potions = {"Base": 10, "SolarSystem": 1009}
user = HabiticaUser(user_dict={"items": {"hatchingPotions": hatch_potions}})
assert user.get_hatch_potions() == hatch_potions
def test_get_eggs(self):
eggs = {"Fox": 1001, "Nudibranch": 9}
user = HabiticaUser(user_dict={"items": {"eggs": eggs}})
assert user.get_eggs() == eggs
| 41.086957 | 98 | 0.544709 | [
"Apache-2.0"
] | rickie/hopla | src/tests/hoplalib/user/test_usermodels.py | 3,780 | Python |
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myshop.settings')
app = Celery('myshop')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| 28.583333 | 67 | 0.781341 | [
"MIT"
] | BendalPrathmesh/E-commerce--site | myshop/celery.py | 343 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
币安推荐码: 返佣10%
https://www.binancezh.pro/cn/register?ref=AIR1GC70
币安合约推荐码: 返佣10%
https://www.binancezh.com/cn/futures/ref/51bitquant
if you don't have a binance account, you can use the invitation link to register one:
https://www.binancezh.com/cn/futures/ref/51bitquant
or use the inviation code: 51bitquant
网格交易: 适合币圈的高波动率的品种,适合现货, 如果交易合约,需要注意防止极端行情爆仓。
服务器购买地址: https://www.ucloud.cn/site/global.html?invitation_code=C1x2EA81CD79B8C#dongjing
"""
from gateway import BinanceSpotHttp, OrderStatus, OrderType, OrderSide
from utils import config
from utils import utility, round_to
from enum import Enum
import logging
from datetime import datetime
class BinanceTrader(object):
def __init__(self):
"""
:param api_key:
:param secret:
:param trade_type: 交易的类型, only support future and spot.
"""
self.http_client = BinanceSpotHttp(api_key=config.api_key, secret=config.api_secret, proxy_host=config.proxy_host, proxy_port=config.proxy_port)
self.buy_orders = [] # 买单.
self.sell_orders = [] # 卖单.
def get_bid_ask_price(self):
ticker = self.http_client.get_ticker(config.symbol)
bid_price = 0
ask_price = 0
if ticker:
bid_price = float(ticker.get('bidPrice', 0))
ask_price = float(ticker.get('askPrice', 0))
return bid_price, ask_price
def grid_trader(self):
"""
执行核心逻辑,网格交易的逻辑.
:return:
"""
bid_price, ask_price = self.get_bid_ask_price()
print(f"bid_price: {bid_price}, ask_price: {ask_price}")
quantity = round_to(float(config.quantity), float(config.min_qty))
self.buy_orders.sort(key=lambda x: float(x['price']), reverse=True) # 最高价到最低价.
self.sell_orders.sort(key=lambda x: float(x['price']), reverse=True) # 最高价到最低价.
print(f"buy orders: {self.buy_orders}")
print("------------------------------")
print(f"sell orders: {self.sell_orders}")
buy_delete_orders = [] # 需要删除买单
sell_delete_orders = [] # 需要删除的卖单
# 买单逻辑,检查成交的情况.
for buy_order in self.buy_orders:
check_order = self.http_client.get_order(buy_order.get('symbol', config.symbol),client_order_id=buy_order.get('clientOrderId'))
if check_order:
if check_order.get('status') == OrderStatus.CANCELED.value:
buy_delete_orders.append(buy_order)
print(f"buy order status was canceled: {check_order.get('status')}")
elif check_order.get('status') == OrderStatus.FILLED.value:
# 买单成交,挂卖单.
logging.info(f"买单成交时间: {datetime.now()}, 价格: {check_order.get('price')}, 数量: {check_order.get('origQty')}")
sell_price = round_to(float(check_order.get("price")) * (1 + float(config.gap_percent)), float(config.min_price))
if 0 < sell_price < ask_price:
# 防止价格
sell_price = round_to(ask_price, float(config.min_price))
new_sell_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.SELL, order_type=OrderType.LIMIT, quantity=quantity, price=sell_price)
if new_sell_order:
buy_delete_orders.append(buy_order)
self.sell_orders.append(new_sell_order)
buy_price = round_to(float(check_order.get("price")) * (1 - float(config.gap_percent)),
config.min_price)
if buy_price > bid_price > 0:
buy_price = round_to(bid_price, float(config.min_price))
new_buy_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.BUY, order_type=OrderType.LIMIT, quantity=quantity, price=buy_price)
if new_buy_order:
self.buy_orders.append(new_buy_order)
elif check_order.get('status') == OrderStatus.NEW.value:
print("buy order status is: New")
else:
print(f"buy order status is not above options: {check_order.get('status')}")
# 过期或者拒绝的订单删除掉.
for delete_order in buy_delete_orders:
self.buy_orders.remove(delete_order)
# 卖单逻辑, 检查卖单成交情况.
for sell_order in self.sell_orders:
check_order = self.http_client.get_order(sell_order.get('symbol', config.symbol),
client_order_id=sell_order.get('clientOrderId'))
if check_order:
if check_order.get('status') == OrderStatus.CANCELED.value:
sell_delete_orders.append(sell_order)
print(f"sell order status was canceled: {check_order.get('status')}")
elif check_order.get('status') == OrderStatus.FILLED.value:
logging.info(
f"卖单成交时间: {datetime.now()}, 价格: {check_order.get('price')}, 数量: {check_order.get('origQty')}")
# 卖单成交,先下买单.
buy_price = round_to(float(check_order.get("price")) * (1 - float(config.gap_percent)), float(config.min_price))
if buy_price > bid_price > 0:
buy_price = round_to(bid_price, float(config.min_price))
new_buy_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.BUY,
order_type=OrderType.LIMIT, quantity=quantity, price=buy_price)
if new_buy_order:
sell_delete_orders.append(sell_order)
self.buy_orders.append(new_buy_order)
sell_price = round_to(float(check_order.get("price")) * (1 + float(config.gap_percent)), float(config.min_price))
if 0 < sell_price < ask_price:
# 防止价格
sell_price = round_to(ask_price, float(config.min_price))
new_sell_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.SELL,
order_type=OrderType.LIMIT, quantity=quantity,
price=sell_price)
if new_sell_order:
self.sell_orders.append(new_sell_order)
elif check_order.get('status') == OrderStatus.NEW.value:
print("sell order status is: New")
else:
print(f"sell order status is not in above options: {check_order.get('status')}")
# 过期或者拒绝的订单删除掉.
for delete_order in sell_delete_orders:
self.sell_orders.remove(delete_order)
# 没有买单的时候.
if len(self.buy_orders) <= 0:
if bid_price > 0:
price = round_to(bid_price * (1 - float(config.gap_percent)), float(config.min_price))
buy_order = self.http_client.place_order(symbol=config.symbol,order_side=OrderSide.BUY, order_type=OrderType.LIMIT, quantity=quantity,price=price)
if buy_order:
self.buy_orders.append(buy_order)
elif len(self.buy_orders) > int(config.max_orders): # 最多允许的挂单数量.
# 订单数量比较多的时候.
self.buy_orders.sort(key=lambda x: float(x['price']), reverse=False) # 最低价到最高价
delete_order = self.buy_orders[0]
order = self.http_client.cancel_order(delete_order.get('symbol'), client_order_id=delete_order.get('clientOrderId'))
if order:
self.buy_orders.remove(delete_order)
# 没有卖单的时候.
if len(self.sell_orders) <= 0:
if ask_price > 0:
price = round_to(ask_price * (1 + float(config.gap_percent)), float(config.min_price))
order = self.http_client.place_order(symbol=config.symbol,order_side=OrderSide.SELL, order_type=OrderType.LIMIT, quantity=quantity,price=price)
if order:
self.sell_orders.append(order)
elif len(self.sell_orders) > int(config.max_orders): # 最多允许的挂单数量.
# 订单数量比较多的时候.
self.sell_orders.sort(key=lambda x: x['price'], reverse=True) # 最高价到最低价
delete_order = self.sell_orders[0]
order = self.http_client.cancel_order(delete_order.get('symbol'),
client_order_id=delete_order.get('clientOrderId'))
if order:
self.sell_orders.remove(delete_order)
| 44.110553 | 179 | 0.586238 | [
"MIT"
] | xgy560501/binance_grid_trader | trader/binance_trader.py | 9,326 | Python |
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
| 36.762128 | 85 | 0.722277 | [
"Apache-2.0"
] | bopopescu/cndw | mac/google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/daisy_utils.py | 23,491 | Python |
# ----------------------------------------------------------------------
# Distributed Lock
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from typing import Optional, List
import datetime
import time
import random
from logging import getLogger
# Third-party modules
import pymongo
from pymongo.collection import Collection
from bson import ObjectId
# NOC modules
from noc.core.mongo.connection import get_db
from noc.core.perf import metrics
from .base import BaseLock, DEFAULT_TTL
DEFAULT_LOCK_WAIT = 1.0
DEFAULT_LOCK_WAIT_JITTER = 0.1
logger = getLogger(__name__)
class DistributedLock(BaseLock):
"""
Distributed locking primitive.
Allows exclusive access to all requested items within category
between the group of processes.
Example
-------
```
lock = DistributedLock("test", "test:12")
with lock.acquire(["obj1", "obj2"]):
...
```
"""
def __init__(self, category: str, owner: str, ttl: Optional[float] = None):
"""
:param category: Lock category name
:param owner: Lock owner id
:param ttl: Default lock ttl in seconds
"""
super().__init__(category, owner, ttl=ttl)
self.collection = self.get_collection()
self.release_all()
def release_all(self):
"""
Release all locks held by owner
"""
self.collection.delete_many({"owner": self.owner})
def get_collection_name(self) -> str:
"""
Get name of the lock collection
"""
return f"locks.{self.category}"
def get_collection(self) -> Collection:
"""
Ensure the collection is exists and indexed properly
"""
coll = get_db()[self.get_collection_name()]
coll.create_index([("items", pymongo.ASCENDING)], unique=True)
coll.create_index([("expires", pymongo.ASCENDING)], expireAfterSeconds=0)
return coll
def acquire_by_items(self, items: List[str], ttl: Optional[float] = None) -> str:
"""
Acquire lock by list of items
"""
lock_id = ObjectId()
ttl = ttl or self.ttl or DEFAULT_TTL
metrics[f"lock_{self.category}_requests"] += 1
logger.debug(
"[%s|%s] Acquiring lock for %s (%s seconds)",
self.category,
self.owner,
", ".join(items),
ttl,
)
while True:
try:
self.collection.insert_one(
{
"_id": lock_id,
"items": items,
"owner": self.owner,
"expire": datetime.datetime.now() + datetime.timedelta(seconds=ttl),
}
)
return str(lock_id)
except pymongo.errors.DuplicateKeyError:
metrics[f"lock_{self.category}_misses"] += 1
jitter = random.random() * DEFAULT_LOCK_WAIT_JITTER * DEFAULT_LOCK_WAIT
timeout = DEFAULT_LOCK_WAIT + jitter
logger.debug(
"[%s|%s] Cannnot get lock. Waiting %s seconds",
self.category,
self.owner,
timeout,
)
time.sleep(timeout)
def release_by_lock_id(self, lock_id: str):
"""
Release lock by id
"""
self.collection.delete_one({"_id": ObjectId(lock_id)})
| 30.495798 | 92 | 0.532929 | [
"BSD-3-Clause"
] | sbworth/getnoc | core/lock/distributed.py | 3,629 | Python |
# Copyright (c) 2020 Aiven, Helsinki, Finland. https://aiven.io/
from .object_storage.gcs import GCSProvider
from argparse import ArgumentParser
from tempfile import TemporaryDirectory
import codecs
import datetime
import dateutil
import gzip
import json
import kafka
import logging
import os
import re
class KafkaRestore:
def __init__(self, *, config):
self.log = logging.getLogger(self.__class__.__name__)
self.config = config
object_storage_config = self.config.get("object_storage", {})
object_storage_type = object_storage_config.get("type")
if object_storage_type == "gcs":
self.object_storage = GCSProvider(config=object_storage_config)
else:
raise ValueError(f"Unknown object storage type: {object_storage_type}")
kafka_config = self.config.get("kafka", {})
if ("ssl_cafile" in kafka_config and
"ssl_access_certificate_file" in kafka_config and
"ssl_access_key_file" in kafka_config):
self.kafka_producer = kafka.KafkaProducer(
bootstrap_servers=kafka_config["kafka_url"],
security_protocol="SSL",
ssl_cafile=kafka_config["ssl_ca_file"],
ssl_certfile=kafka_config["ssl_access_certificate_file"],
ssl_keyfile=kafka_config["ssl_access_key_file"],
)
else:
self.kafka_producer = kafka.KafkaProducer(
bootstrap_servers=kafka_config["kafka_url"],
)
def list_topic_data_files(self, *, topic):
topic_re = re.compile(
(
r"(?P<topic>" + re.escape(topic) + r")"
r"-(?P<partition>[0-9]+)"
r"-(?P<offset>[0-9]+)"
r"(?P<suffix>[.a-z]*)"
)
)
topic_partition_files = {}
for item in self.object_storage.list_items():
matches = topic_re.match(item.name)
if matches:
partition = int(matches.group("partition"))
if partition not in topic_partition_files:
topic_partition_files[partition] = []
begin_offset = matches.group("offset")
record = {
"begin_offset": int(begin_offset),
"last_modified": item.last_modified,
"object_name": item.name,
}
if matches.group("suffix") == ".gz":
record["compression"] = "gzip"
topic_partition_files[partition].append(record)
for partition in topic_partition_files:
topic_partition_files[partition] = sorted(topic_partition_files[partition], key=lambda x: x["begin_offset"])
return topic_partition_files
def parse_record(self, record_line):
fields = record_line.split(",")
if fields[0]:
key = codecs.decode(codecs.encode(fields[0], "ascii"), "base64")
else:
key = None
if fields[1]:
value = codecs.decode(codecs.encode(fields[1], "ascii"), "base64")
else:
value = None
offset = int(fields[2])
if fields[3]:
timestamp = int(fields[3])
else:
timestamp = None
return key, value, offset, timestamp
def restore(self, *, topic):
topic_partition_files = self.list_topic_data_files(topic=topic)
partition_offset_records = {}
since = self.config.get("since")
with TemporaryDirectory() as working_directory:
while True:
progress = False
for partition in topic_partition_files:
if topic_partition_files[partition]:
object_record = topic_partition_files[partition][0]
topic_partition_files[partition] = topic_partition_files[partition][1:]
progress = True
object_name = object_record["object_name"]
if since is not None and since > object_record["last_modified"]:
self.log.info("Skipping object %r due to timestamp", object_name)
continue
local_name = f"{working_directory}/{topic}-{partition}"
self.object_storage.get_contents_to_file(object_name, local_name)
if object_record.get("compression") == "gzip":
fh = gzip.open(local_name, "rt")
else:
fh = open(local_name, "r")
nrecords = 0
for line in fh.readlines():
key, value, offset, timestamp = self.parse_record(line.strip())
future_record = self.kafka_producer.send(
topic,
partition=partition,
key=key,
value=value,
timestamp_ms=timestamp,
)
nrecords += 1
partition_offset_records[partition] = {
"last_original_offset": offset,
"last_produced_record": future_record,
}
self.log.info("Restored %d messages from object %r", nrecords, object_name)
fh.close()
os.unlink(local_name)
if not progress:
self.kafka_producer.flush()
break
for partition in sorted(partition_offset_records):
self.log.info(
"Partition %d original offset %d new offset %d",
partition,
partition_offset_records[partition]["last_original_offset"],
partition_offset_records[partition]["last_produced_record"].get().offset,
)
def main():
logging.basicConfig(level=logging.INFO, format="%(name)-20s %(levelname)-8s %(message)s")
parser = ArgumentParser()
parser.add_argument("-c", "--config", required=True, help="Path to config file")
parser.add_argument("-t", "--topic", required=True, help="Topic name")
parser.add_argument("--since", help="Skip objects that are older than given timestamp")
args = parser.parse_args()
with open(args.config) as fh:
restore_config = json.load(fh)
if args.since:
dt = dateutil.parser.parse(args.since)
if dt.tzinfo is None:
# assume UTC if no timezone is present
dt = dt.replace(tzinfo=datetime.timezone.utc)
restore_config["since"] = dt
kafka_restore = KafkaRestore(config=restore_config)
kafka_restore.restore(topic=args.topic)
if __name__ == "__main__":
main()
| 37.462366 | 120 | 0.548794 | [
"Apache-2.0"
] | AlexAmin/aiven-kafka-restore | kafka_restore/__main__.py | 6,968 | Python |
# Generated by Django 4.0 on 2022-01-10 10:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('funblog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='fblog',
name='DOC',
field=models.DateTimeField(auto_now_add=True, verbose_name='Date of creating'),
),
migrations.AlterField(
model_name='fblog',
name='DOU',
field=models.DateTimeField(auto_now=True, verbose_name='Date of updating'),
),
migrations.AlterField(
model_name='fblog',
name='comment',
field=models.TextField(max_length=128, verbose_name='Comment'),
),
]
| 26.689655 | 91 | 0.582687 | [
"MIT"
] | larryw3i/osp | fun/funblog/migrations/0002_alter_fblog_doc_alter_fblog_dou_alter_fblog_comment.py | 774 | Python |
try:
xrange
except:
xrange = range
def totalvalue(comb):
' Totalise a particular combination of items'
totwt = totval = 0
for item, wt, val in comb:
totwt += wt
totval += val
return (totval, -totwt) if totwt <= 400 else (0, 0)
items = (
("map", 9, 150), ("compass", 13, 35), ("water", 153, 200), ("sandwich", 50, 160),
("glucose", 15, 60), ("tin", 68, 45), ("banana", 27, 60), ("apple", 39, 40),
("cheese", 23, 30), ("beer", 52, 10), ("suntan cream", 11, 70), ("camera", 32, 30),
("t-shirt", 24, 15), ("trousers", 48, 10), ("umbrella", 73, 40),
("waterproof trousers", 42, 70), ("waterproof overclothes", 43, 75),
("note-case", 22, 80), ("sunglasses", 7, 20), ("towel", 18, 12),
("socks", 4, 50), ("book", 30, 10),
)
def knapsack01_dp(items, limit):
table = [[0 for w in range(limit + 1)] for j in range(len(items) + 1)]
for j in range(1, len(items) + 1):
item, wt, val = items[j-1]
for w in range(1, limit + 1):
if wt > w:
table[j][w] = table[j-1][w]
else:
table[j][w] = max(table[j-1][w],
table[j-1][w-wt] + val)
result = []
w = limit
for j in range(len(items), 0, -1):
was_added = table[j][w] != table[j-1][w]
if was_added:
item, wt, val = items[j-1]
result.append(items[j-1])
w -= wt
return result
bagged = knapsack01_dp(items, 400)
print(("Bagged the following items\n " +
'\n '.join(sorted(item for item,_,_ in bagged))))
val, wt = totalvalue(bagged)
print(("for a total value of %i and a total weight of %i" % (val, -wt)))
| 31.518519 | 87 | 0.509988 | [
"MIT"
] | ethansaxenian/RosettaDecode | lang/Python/knapsack-problem-0-1-2.py | 1,702 | Python |
#
# voice-skill-sdk
#
# (C) 2020, Deutsche Telekom AG
#
# This file is distributed under the terms of the MIT license.
# For details see the file LICENSE in the top directory.
#
#
# Circuit breaker for skills requesting external services
#
from .config import config
from circuitbreaker import CircuitBreaker
from requests.exceptions import RequestException
class SkillCircuitBreaker(CircuitBreaker):
""" Circuit breaker's defaults from skill config """
FAILURE_THRESHOLD = config.getint('circuit_breakers', 'threshold', fallback=5)
RECOVERY_TIMEOUT = config.getint('circuit_breakers', 'timeout', fallback=30)
EXPECTED_EXCEPTION = RequestException
# Default circuit breaker will be used if no custom breaker supplied
DEFAULT_CIRCUIT_BREAKER = SkillCircuitBreaker()
| 27.172414 | 82 | 0.777919 | [
"MIT"
] | Anrufliste/voice-skill-sdk | skill_sdk/circuit_breaker.py | 788 | Python |
from itertools import combinations
f = open("input.txt")
d = f.readlines()
nog = []
for l in d:
nog.append(int(l))
combos = 0
min_num = 0
do_break = False
for nog_len in range(len(nog)):
for c in combinations(nog, nog_len):
combo_sum = sum(c)
if combo_sum == 150:
min_num = nog_len
do_break = True
if do_break:
break
if do_break:
break
for c in combinations(nog, min_num):
combo_sum = sum(c)
if combo_sum == 150:
combos += 1
print(combos)
| 18.586207 | 40 | 0.580705 | [
"MIT"
] | pwicks86/adventofcode2015 | day17/p2.py | 539 | Python |
import logging
from angr_platforms.msp430 import arch_msp430, lift_msp430, simos_msp430
import angr
import os
def test_new_orleans():
thebin = str(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../test_programs/msp430/microcorruption_new_orleans/out.elf'))
p = angr.Project(thebin, load_options={'rebase_granularity': 8})
p.hook_symbol('getsn', simos_msp430.MCgetsn())
p.hook_symbol('__stop_progExec__', simos_msp430.MCstopexec())
p.hook_symbol('puts', simos_msp430.MCputs())
simgr = p.factory.simulation_manager()
simgr.explore(find=p.loader.find_symbol('unlock_door').rebased_addr)
stdin_contents = simgr.found[0].posix.dumps(0)
assert '7d493c6a51373f' in stdin_contents.hex()
if __name__ == '__main__':
test_new_orleans()
| 38.571429 | 93 | 0.724691 | [
"BSD-2-Clause"
] | shahinsba/angr-platforms | tests/test_msp430_mc_new_orleans.py | 810 | Python |
import pytest
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
@pytest.fixture
def client():
return APIClient()
@pytest.fixture
def db_user():
user_data = {
'email': '[email protected]',
'password': 'testpass123',
'first_name': 'Jack',
'last_name': 'Programmer',
}
return get_user_model().objects.create_user(**user_data)
| 20.65 | 60 | 0.670702 | [
"MIT"
] | thehomebrewnerd/react-django-template | backend/user/tests/conftest.py | 413 | Python |
from django.apps import AppConfig
class ActivityFeedConfig(AppConfig):
"""App config for activity_feed."""
name = 'datahub.activity_feed'
| 18.625 | 39 | 0.738255 | [
"MIT"
] | Staberinde/data-hub-api | datahub/activity_feed/apps.py | 149 | Python |
#crie um tupla com o nome dos produtos, seguidos do preço.
#mostre uma listagem de preços, de forma tabular.
lista = ('Lápis', 1.5, 'Borracha', 2.5, 'Caderno', 10.8,
'Estojo', 20, 'Mochila', 100.5)
print('\033[31m--'*20)
print(f'{"LISTAGEM DE PREÇOS":^40}')
print('--'*20, '\033[m')
for i in range(0, len(lista), 2):
print(f'{lista[i]:.<30}R${lista[i+1]:>5.2f}')
print('\033[31m--\033[m'*20)
''' Formatação:
print(f'{"LISTAGEM DE PREÇOS":^40}')
centralizado = {elemento:^quantidade}
à direita = {:<quantidade} > preenche com espaço
à direita = {:.<quantidade} > preenche com ponto
à esquerda = {:>quantidade} > preenche com espaço
à esquerda = {:->quantidade} > preenche com -
'''
| 30.347826 | 58 | 0.637536 | [
"MIT"
] | NataliaNasu/cursoemvideo-python3 | PacoteDownload/ex076.py | 711 | Python |
#!/usr/bin/env python
import os
import json
import pprint as pp
from time import time
import torch
import torch.optim as optim
from tensorboard_logger import Logger as TbLogger
from nets.critic_network import CriticNetwork
from options import get_options
from train import train_epoch, validate, get_inner_model
from reinforce_baselines import NoBaseline, ExponentialBaseline, CriticBaseline, RolloutBaseline, WarmupBaseline
from nets.attention_model import AttentionModel
from nets.pointer_network import PointerNetwork, CriticNetworkLSTM
from utils import torch_load_cpu, load_problem
import pickle
# for hyperparameter tuning using wanb
# https://docs.wandb.ai/sweeps/quickstart
import torch.nn.functional as F
import torchvision.datasets as datasets
import torch.nn as nn
import wandb
from torchvision import datasets, transforms
def run(opts):
# start time
start_time = time()
train_run = []
opts.save_hrs.sort()
run_name = opts.run_name
# Pretty print the run args
pp.pprint(vars(opts))
# Set the random seed
torch.manual_seed(opts.seed)
# Optionally configure tensorboard
tb_logger = None
if not opts.no_tensorboard:
tb_logger = TbLogger(os.path.join(opts.log_dir, "{}_{}".format(opts.problem, opts.graph_size), opts.run_name))
os.makedirs(opts.save_dir)
# Save arguments so exact configuration can always be found
with open(os.path.join(opts.save_dir, "args.json"), 'w') as f:
json.dump(vars(opts), f, indent=True)
# Set the device
opts.device = torch.device("cuda:0" if opts.use_cuda else "cpu")
# Figure out what's the problem
problem = load_problem(opts.problem)
# Load data from load_path
load_data = {}
assert opts.load_path is None or opts.resume is None, "Only one of load path and resume can be given"
load_path = opts.load_path if opts.load_path is not None else opts.resume
if load_path is not None:
print(' [*] Loading data from {}'.format(load_path))
load_data = torch_load_cpu(load_path)
# hyperparameter search
# default (user specified) config
config_defaults = {
'batch_size': opts.batch_size,
'lr_model': opts.lr_model,
'lr_critic': opts.lr_critic,
'lr_decay': opts.lr_decay,
}
# determine the parameter space
"""sweep_config = {
'parameters': {
'batch_size': {
'values': [256, 128, 64, 32]
},
'lr_model': {
'values': [1e-2, 1e-3, 1e-4, 3e-4, 3e-5, 1e-5]
},
'lr_critic': {
'values': [1e-2, 1e-3, 1e-4, 3e-4, 3e-5, 1e-5]
},
'lr_decay': {
'lr_decay': [0.9, 0.95, 1.0, 1.05, 1.1, 1.15]
},
}
}"""
# initialize the sweep
# sweep_id = wandb.sweep(sweep_config, project="Pytorch-sweeps")
# Initialize a new wandb run
wandb.init(config=config_defaults)
# Config is a variable that holds and saves hyperparameters and inputs
config = wandb.config
# ??? any code for setting up hyperparameters interested should use config.parameter to set instead of opt.parameter
# including functions in other files-> pass config to other functions
# Initialize model
model_class = {
'attention': AttentionModel,
'pointer': PointerNetwork
}.get(opts.model, None)
assert model_class is not None, "Unknown model: {}".format(model_class)
model = model_class(
opts.embedding_dim,
opts.hidden_dim,
problem,
n_encode_layers=opts.n_encode_layers,
mask_inner=True,
mask_logits=True,
normalization=opts.normalization,
tanh_clipping=opts.tanh_clipping,
checkpoint_encoder=opts.checkpoint_encoder,
shrink_size=opts.shrink_size
).to(opts.device)
if opts.use_cuda and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# Overwrite model parameters by parameters to load
model_ = get_inner_model(model)
model_.load_state_dict({**model_.state_dict(), **load_data.get('model', {})})
# Initialize baseline
if opts.baseline == 'exponential':
baseline = ExponentialBaseline(opts.exp_beta)
elif opts.baseline == 'critic' or opts.baseline == 'critic_lstm':
assert problem.NAME == 'tsp', "Critic only supported for TSP"
baseline = CriticBaseline(
(
CriticNetworkLSTM(
2,
opts.embedding_dim,
opts.hidden_dim,
opts.n_encode_layers,
opts.tanh_clipping
)
if opts.baseline == 'critic_lstm'
else
CriticNetwork(
2,
opts.embedding_dim,
opts.hidden_dim,
opts.n_encode_layers,
opts.normalization
)
).to(opts.device)
)
elif opts.baseline == 'rollout':
baseline = RolloutBaseline(model, problem, opts)
else:
assert opts.baseline is None, "Unknown baseline: {}".format(opts.baseline)
baseline = NoBaseline()
if opts.bl_warmup_epochs > 0:
baseline = WarmupBaseline(baseline, opts.bl_warmup_epochs, warmup_exp_beta=opts.exp_beta)
# Load baseline from data, make sure script is called with same type of baseline
if 'baseline' in load_data:
baseline.load_state_dict(load_data['baseline'])
# Initialize optimizer
optimizer = optim.Adam(
[{'params': model.parameters(), 'lr': config.lr_model}]
+ (
[{'params': baseline.get_learnable_parameters(), 'lr': config.lr_critic}]
if len(baseline.get_learnable_parameters()) > 0
else []
)
)
# Load optimizer state
if 'optimizer' in load_data:
optimizer.load_state_dict(load_data['optimizer'])
for state in optimizer.state.values():
for k, v in state.items():
# if isinstance(v, torch.Tensor):
if torch.is_tensor(v):
state[k] = v.to(opts.device)
# Initialize learning rate scheduler, decay by lr_decay once per epoch!
lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: config.lr_decay ** epoch)
# Start the actual training loop
val_dataset = problem.make_dataset(
size=opts.graph_size, num_samples=opts.val_size, filename=opts.val_dataset, distribution=opts.data_distribution)
if opts.resume:
epoch_resume = int(os.path.splitext(os.path.split(opts.resume)[-1])[0].split("-")[1])
torch.set_rng_state(load_data['rng_state'])
if opts.use_cuda:
torch.cuda.set_rng_state_all(load_data['cuda_rng_state'])
# Set the random states
# Dumping of state was done before epoch callback, so do that now (model is loaded)
baseline.epoch_callback(model, epoch_resume)
print("Resuming after {}".format(epoch_resume))
opts.epoch_start = epoch_resume + 1
torch.save(model, os.path.join('.', 'empty.pt'))
if opts.eval_only:
validate(model, val_dataset, opts)
else:
for epoch in range(opts.epoch_start, opts.epoch_start + opts.n_epochs):
avg_time = train_epoch(
model,
optimizer,
baseline,
lr_scheduler,
epoch,
val_dataset,
problem,
tb_logger,
opts,
start_time,
config
)
train_run.append(avg_time)
for hr in opts.save_hrs:
if (time() - start_time) > hr*3600:
opts.save_hrs.remove(hr)
print('Saving model and state...')
hr_time = int(round((time()-start_time)/3600))
with open('../models/att/hist_{}_{}hr.pickle'.format(run_name,hr_time), 'wb') as handle:
pickle.dump(train_run, handle, protocol=pickle.HIGHEST_PROTOCOL)
torch.save(
{
'model': get_inner_model(model).state_dict(),
'optimizer': optimizer.state_dict(),
'rng_state': torch.get_rng_state(),
'cuda_rng_state': torch.cuda.get_rng_state_all(),
'baseline': baseline.state_dict()
},
os.path.join('../models/att', '{}_{}hr-model-att-only.pt'.format(run_name,hr_time))
)
torch.save(model, os.path.join('../models/att', '{}_{}hr-model.pt'.format(run_name,hr_time)))
if __name__ == "__main__":
run(get_options())
| 36.334694 | 120 | 0.601887 | [
"MIT"
] | angela18199/CORL_hyperparameter_search | hyper_attention/run.py | 8,902 | Python |
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
import pytest
from models_library.basic_types import LogLevel
from simcore_service_director_v2.core.settings import (
AppSettings,
BootModeEnum,
DynamicSidecarProxySettings,
DynamicSidecarSettings,
RegistrySettings,
)
def test_settings_with_project_env_devel(project_env_devel_environment):
# loads from environ
settings = AppSettings.create_from_envs()
print("captured settings: \n", settings.json(indent=2))
assert settings.SC_BOOT_MODE == BootModeEnum.DEBUG
assert settings.LOG_LEVEL == LogLevel.DEBUG
assert settings.POSTGRES.dsn == "postgresql://test:test@localhost:5432/test"
def test_settings_with_env_devel(mock_env_devel_environment):
settings = AppSettings.create_from_envs()
print("captured settings: \n", settings.json(indent=2))
assert settings
@pytest.mark.parametrize(
"image",
[
"local/dynamic-sidecar:development",
"local/dynamic-sidecar:production",
"itisfoundation/dynamic-sidecar:merge-github-testbuild-latest",
"itisfoundation/dynamic-sidecar:1.0.0",
"local/dynamic-sidecar:0.0.1",
"dynamic-sidecar:production",
"/dynamic-sidecar:latest",
"/local/dynamic-sidecar:latest",
],
)
def test_dynamic_sidecar_settings(image: str) -> None:
required_kwards = dict(
DYNAMIC_SIDECAR_IMAGE=image,
SIMCORE_SERVICES_NETWORK_NAME="test",
TRAEFIK_SIMCORE_ZONE="",
SWARM_STACK_NAME="",
DYNAMIC_SIDECAR_PROXY_SETTINGS=DynamicSidecarProxySettings(),
REGISTRY=RegistrySettings(
REGISTRY_URL="http://te.st",
REGISTRY_AUTH=True,
REGISTRY_USER="test",
REGISTRY_PW="test",
REGISTRY_SSL=False,
),
)
settings = DynamicSidecarSettings(**required_kwards)
assert settings.DYNAMIC_SIDECAR_IMAGE == image.lstrip("/")
| 31.015625 | 80 | 0.70529 | [
"MIT"
] | ITISFoundation/osparc-simcore | services/director-v2/tests/unit/test_core_settings.py | 1,985 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class MonitoringSettingsOperations(object):
"""MonitoringSettingsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2020_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
service_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.MonitoringSettingResource"
"""Get the Monitoring Setting and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MonitoringSettingResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
def _update_put_initial(
self,
resource_group_name, # type: str
service_name, # type: str
monitoring_setting_resource, # type: "_models.MonitoringSettingResource"
**kwargs # type: Any
):
# type: (...) -> "_models.MonitoringSettingResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_put_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(monitoring_setting_resource, 'MonitoringSettingResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_put_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
def begin_update_put(
self,
resource_group_name, # type: str
service_name, # type: str
monitoring_setting_resource, # type: "_models.MonitoringSettingResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.MonitoringSettingResource"]
"""Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monitoring_setting_resource: Parameters for the update operation.
:type monitoring_setting_resource: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either MonitoringSettingResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_put_initial(
resource_group_name=resource_group_name,
service_name=service_name,
monitoring_setting_resource=monitoring_setting_resource,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_put.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
def _update_patch_initial(
self,
resource_group_name, # type: str
service_name, # type: str
monitoring_setting_resource, # type: "_models.MonitoringSettingResource"
**kwargs # type: Any
):
# type: (...) -> "_models.MonitoringSettingResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_patch_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(monitoring_setting_resource, 'MonitoringSettingResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_patch_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
def begin_update_patch(
self,
resource_group_name, # type: str
service_name, # type: str
monitoring_setting_resource, # type: "_models.MonitoringSettingResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.MonitoringSettingResource"]
"""Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monitoring_setting_resource: Parameters for the update operation.
:type monitoring_setting_resource: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either MonitoringSettingResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_patch_initial(
resource_group_name=resource_group_name,
service_name=service_name,
monitoring_setting_resource=monitoring_setting_resource,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_patch.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
| 51.702703 | 210 | 0.68437 | [
"MIT"
] | AriZavala2/azure-sdk-for-python | sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2020_11_01_preview/operations/_monitoring_settings_operations.py | 19,130 | Python |
# coding: utf-8
# In[ ]:
def choice():
print("1-create,2-update,3-read,4-delete")
try:
x=int(input("\nEnter your choice:"))
except ValueError:
print("Enter integer choice:....")
choice()
else:
if(x==1):
create()
elif(x==2):
update()
elif(x==3):
read()
else:
delete()
def create():
try:
id=int(input("\nEnter your id:"))
except ValueError:
f=int(input("Enter a valid integer number or press 0 to exit:"))
if(f==0):
choice()
else:
create()
else:
name=str(input("Enter your name:"))
college=str(input("Enter the college name:"))
branch=str(input("Enter the branch:"))
print("\n")
lid.append(id)
lname.append(name)
lcollege.append(college)
lbranch.append(branch)
choice()
def update():
try:
id=int(input("Enter your id:"))
except ValueError:
print("\nEnter valid integer id.......")
update()
else:
if id in lid:
r=lid.index(id)
newname=str(input("Enter the name"))
lname[r]=newname
newcollege=str(input("Enter the college name:"))
lcollege[r]=newcollege
newbranch=str(input("Enter the branch:"))
lbranch[r]=newbranch
else:
print("id didnot match........")
print("please register yourself....")
choice()
def read():
try:
db=int(input("\nTo access database enter id:"))
except ValueError:
print("Enter integer id.....")
read()
else:
if db in lid:
print("ID:-",lid)
print("NAMES:-",lname)
print("COLLEGE:-",lcollege)
print("BRANCH:-",lbranch)
elif(lid==dummy):
print("\nno records......")
else:
print("\nRegister inorder to access database.....")
choice()
def delete():
if(lid==dummy):
print("No records found to delete.....")
else:
try:
id=int(input("\nEnter your id:"))
except ValueError:
print("\nEnter the valid integer id.....")
delete()
else:
if id in lid:
delid.append(id)
d=lid.index(id)
del lid[d]
del lname[d]
del lcollege[d]
del lbranch[d]
print("\ndetails of your id has been deleted sucessfully......")
elif id in delid:
print("\nDetails of this id has been deleted......")
else:
print("\nregister the id... ")
choice()
#creating lists
lid=[]
lname=[]
lcollege=[]
lbranch=[]
dummy=[]
delid=[] #list of deleted id
choice()
| 25.625 | 80 | 0.48223 | [
"MIT"
] | AbhishekSalian/Database | database.py | 2,870 | Python |
# !/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE', 'bootstrap_components.tests.settings'
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 20.461538 | 65 | 0.793233 | [
"MIT"
] | IshanManchanda/dj-bootstrap-components | manage.py | 266 | Python |
#!/usr/bin/python3
import socket
import re
import time
pattern = re.compile('N=\d+\sC=\d+')
s = socket.socket()
s.connect(('localhost', 9007))
s.recv(1024) # what received is just a introduction, we do not need it.
time.sleep(4)
while True:
received = s.recv(1024).decode('ascii')
print(received, end='')
received = received.replace('\n', '')
matches = re.findall(pattern, received)
if len(matches) == 0:
break
match = matches[0].split(' ')
N = int(match[0].replace('N=', ''))
C = int(match[1].replace('C=', ''))
start = 0
end = N
for i in range(0, C):
if end - start == 1:
print(start)
s.send(str(start).encode('ascii') + b'\n')
else:
sd = ' '.join(str(j) for j in range(start, (end + start) // 2))
print(sd)
s.send(sd.encode('ascii') + b'\n')
result = s.recv(1024).decode('ascii')
print(result, end = '')
if result.startswith('Correct'):
break
try:
result = int(result.replace('\n', ''))
except:
exit(-1)
if result == ((end + start) // 2 - start) * 10:
start = (end + start) // 2
else:
end = (end + start) // 2
s.send(str(start).encode('ascii') + b'\n')
print(s.recv(1024).decode('ascii'), end = '')
| 25.388889 | 78 | 0.504741 | [
"MIT"
] | IdanBanani/Pwnable.kr-CTF-Writeups | pwnable.kr/Toddler's Bottle/coin1/coin1.py | 1,371 | Python |
import pymysql
db = pymysql.connect('localhost', 'root', 'password', 'login')
cursor = db.cursor()
def sign_up():
userid = input('Enter UserID : ')
query = "select * from login;"
try:
cursor.execute(query)
fetch = cursor.fetchall()
n = len(fetch)
flag = 0
for i in range(n):
if userid == fetch[i][0]:
print('User already exist!, redirecting to login page')
flag = 1
login()
if flag == 0:
pswd = input('Enter password : ')
re_pswd = input('Enter password again : ')
if pswd == re_pswd:
query = "insert into login values('" + userid + "', '" + pswd + "');"
try:
cursor.execute(query)
print('Sign up successful')
except:
db.rollback()
print('Sign up fail')
else:
print("Password didn't match")
except:
db.rollback()
def login():
userid = input('Enter UserID : ')
query = "select * from login;"
try:
cursor.execute(query)
fetch = cursor.fetchall()
n = len(fetch)
flag = 0
for i in range(n):
if userid == fetch[i][0]:
pswd = input('Enter password : ')
if pswd == fetch[i][1]:
print('Login successfully :)')
flag = 1
else:
print("UserID and password doesn't match")
flag = 1
if flag == 0:
print('NO such user exist!, redirecting to Sign Up page')
sign_up()
except:
db.rollback()
def show():
query = "select * from login;"
try:
cursor.execute(query)
fetch = cursor.fetchall()
n = len(fetch)
for i in range(n):
print(fetch[i][0], fetch[i][1])
except:
db.rollback()
print('Bad')
def change_pswd():
userid = input('Enter UserId : ')
query = 'select * from login;'
try:
cursor.execute(query)
fetch = cursor.fetchall()
n = len(fetch)
flag = 0
for i in range(n):
if userid == fetch[i][0]:
pswd = input('Enter old password : ')
if pswd == fetch[i][1]:
new_pswd = input('Enter new password : ')
query = "update login set password = \'" + new_pswd + "\' where userid = \'" + userid + "\'"
try:
cursor.execute(query)
db.commit()
print('Password changed successfully')
flag = 1
except:
db.rollback()
else:
print('Wrong password')
flag = 1
if flag == 0:
print('No such user exist!')
except:
db.rollback()
print('Bad')
print('1.Sign UP', '2.Login', '3.Show All User', '4.Change Password', sep="\n")
opt = int(input())
if opt == 1:
sign_up()
elif opt == 2:
login()
elif opt == 3:
show()
elif opt == 4:
change_pswd()
| 30.715596 | 113 | 0.433393 | [
"Unlicense"
] | vinay-yadav/Login_system | db_login_system.py | 3,348 | Python |
import os
from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
from skimage import io
from tensorflow import keras
class BimodalDenoiseDataGen(keras.utils.Sequence):
'''
Generate train/validation/test samples for our multimodal
denoise network. Inputs are static images, spectrograms and
corresponding noisy labels. Outputs are noisy labels. In
order to decorrelate training samples, we randonly shuffle
movie sequences, sequentially fetch sel_movie movie clips
from that sequence, then randomly select sel_frames frames
from each moive clip.
'''
def __init__(self,
label_file,
length_file,
sample_rate,
video_root,
audio_root,
video_shape,
audio_shape,
video_preproc,
audio_preproc,
sel_movies,
sel_frames,
n_classes,
affective_type,
ret_label_X=True,
ret_label_y=True):
self.__parse_label_file (label_file , affective_type)
self.__parse_length_file(length_file, sample_rate)
self.file_list = list(self.label_dict.keys())
self.video_root = video_root
self.audio_root = audio_root
self.video_preproc = video_preproc
self.audio_preproc = audio_preproc
self.sel_movies = sel_movies
self.sel_frames = sel_frames
self._video_shape = video_shape
self._audio_shape = audio_shape
self._n_classes = n_classes
self._batch_size = self.sel_movies*self.sel_frames
self.ret_label_X = ret_label_X
self.ret_label_y = ret_label_y
self.on_epoch_end()
def on_epoch_end(self):
np.random.shuffle(self.file_list)
def __parse_label_file(self, label_file, affective_type):
label_table = pd.read_table(label_file)
self.label_dict = dict(
zip(
label_table["name"],
label_table["valenceClass"] if affective_type == "val"
else label_table["arousalClass"]
))
def __parse_length_file(self, length_file, sample_rate):
length_table = pd.read_table(length_file)
self.length_dict = dict(
zip(
length_table["name"],
[l//sample_rate for l in length_table["length"]]
))
def __len__(self):
num = len(self.label_dict)
return num // self.sel_movies
def __getitem__(self, i):
batch_file_list = self.file_list[i*self.sel_movies:(i+1)*self.sel_movies]
X, y = self._data_generator(batch_file_list)
return X, y
def _data_generator(self, batch_file_list):
videos = np.zeros((self._batch_size, *self.video_shape), dtype=np.float32)
audios = np.zeros((self._batch_size, *self.audio_shape), dtype=np.float32)
labels = []
for i, filename in enumerate(batch_file_list):
length = self.length_dict[filename]
frame_idx = np.random.choice(length, self.sel_frames)
for j, idx in enumerate(frame_idx):
videos[i*self.sel_frames+j] = io.imread(
Path(self.video_root)/"{}_{}.jpg".format(filename, idx)
)
audios[i*self.sel_frames+j] = np.load(
Path(self.audio_root)/"{}_{}.npy".format(filename, idx)
)[..., None]
labels += [self.label_dict[filename]]*self.sel_frames
if self.video_preproc:
videos = self.video_preproc(videos)
if self.audio_preproc:
audios = self.audio_preproc(audios)
labels = keras.utils.to_categorical(labels, self._n_classes)
X = [videos, audios]
y = []
if self.ret_label_X:
X += [labels]
if self.ret_label_y:
y += [labels]
return X, y
@property
def batch_size(self):
return self._batch_size
@property
def video_shape(self):
return self._video_shape
@property
def audio_shape(self):
return self._audio_shape
@property
def n_classes(self):
return self._n_classes
class BimodalClassifierDataGen(BimodalDenoiseDataGen):
def __init__(self,
training,
denoise_model=None,
**kwargs):
super(BimodalClassifierDataGen, self).__init__(**kwargs)
self.training = training
if self.training:
assert denoise_model is not None, \
"must specify denoise model in training mode!"
self.denoise_model = denoise_model
def __getitem__(self, i):
batch_file_list = self.file_list[i*self.sel_movies:(i+1)*self.sel_movies]
X, _ = self._data_generator(batch_file_list)
#if self.training == True:
# y = self.denoise_model.predict(X)
#else:
y = X[-1]
X = [X[0], X[1]]
return X, y
class DenoiseDataGen(keras.utils.Sequence):
def __init__(self,
label_file,
length_file,
sample_rate,
video_root,
audio_root,
video_shape,
audio_shape,
video_preproc,
audio_preproc,
sel_movies,
sel_frames,
n_classes,
affective_type,
modality,
ret_label_X=True,
ret_label_y=True):
self.__parse_label_file (label_file , affective_type)
self.__parse_length_file(length_file, sample_rate)
self.file_list = list(self.label_dict.keys())
self.video_root = video_root
self.audio_root = audio_root
self.video_preproc = video_preproc
self.audio_preproc = audio_preproc
self.sel_movies = sel_movies
self.sel_frames = sel_frames
self._video_shape = video_shape
self._audio_shape = audio_shape
self._n_classes = n_classes
self._batch_size = self.sel_movies*self.sel_frames
self.ret_label_X = ret_label_X
self.ret_label_y = ret_label_y
self.modality = modality
assert modality in ["visual", "aural"]
self.on_epoch_end()
def on_epoch_end(self):
np.random.shuffle(self.file_list)
def __parse_label_file(self, label_file, affective_type):
label_table = pd.read_table(label_file)
self.label_dict = dict(
zip(
label_table["name"],
label_table["valenceClass"] if affective_type == "val"
else label_table["arousalClass"]
))
def __parse_length_file(self, length_file, sample_rate):
length_table = pd.read_table(length_file)
self.length_dict = dict(
zip(
length_table["name"],
[l//sample_rate for l in length_table["length"]]
))
def __len__(self):
num = len(self.label_dict)
return num // self.sel_movies
def __getitem__(self, i):
batch_file_list = self.file_list[i*self.sel_movies:(i+1)*self.sel_movies]
X, y = self._data_generator(batch_file_list)
return X, y
def _data_generator(self, batch_file_list):
videos = np.zeros((self._batch_size, *self.video_shape), dtype=np.float32)
audios = np.zeros((self._batch_size, *self.audio_shape), dtype=np.float32)
labels = []
for i, filename in enumerate(batch_file_list):
length = self.length_dict[filename]
frame_idx = np.random.choice(length, self.sel_frames)
if self.modality == "visual":
for j, idx in enumerate(frame_idx):
videos[i*self.sel_frames+j] = io.imread(
Path(self.video_root)/"{}_{}.jpg".format(filename, idx)
)
labels += [self.label_dict[filename]]*self.sel_frames
elif self.modality == "aural":
for j, idx in enumerate(frame_idx):
audios[i*self.sel_frames+j] = np.load(
Path(self.audio_root)/"{}_{}.npy".format(filename, idx)
)[..., None]
labels += [self.label_dict[filename]]*self.sel_frames
if self.video_preproc and self.modality == "visual":
videos = self.video_preproc(videos)
if self.audio_preproc and self.modality == "aural":
audios = self.audio_preproc(audios)
labels = keras.utils.to_categorical(labels, self._n_classes)
X = [videos] if self.modality == "visual" else [audios]
y = []
if self.ret_label_X:
X += [labels]
if self.ret_label_y:
y += [labels]
return X, y
@property
def batch_size(self):
return self._batch_size
@property
def video_shape(self):
return self._video_shape
@property
def audio_shape(self):
return self._audio_shape
@property
def n_classes(self):
return self._n_classes
class ClassifierDataGen(DenoiseDataGen):
def __init__(self,
training,
denoise_model=None,
**kwargs):
super(ClassifierDataGen, self).__init__(**kwargs)
self.training = training
if self.training:
assert denoise_model is not None, \
"must specify denoise model in training mode!"
self.denoise_model = denoise_model
def __getitem__(self, i):
batch_file_list = self.file_list[i*self.sel_movies:(i+1)*self.sel_movies]
X, _ = self._data_generator(batch_file_list)
#if self.training == True:
# y = self.denoise_model.predict(X)
#else:
y = X[-1]
X = X[0]
return X, y | 35.69863 | 83 | 0.560054 | [
"MIT"
] | yaochenzhu/MMDQEN | data.py | 10,424 | Python |
# -*- coding: utf-8 -*-
"""layers.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fCQ_zLCcWNzgE99LK9B2cWrql8J3HgBO
"""
# Author : Vedant Shah
# E-mail : [email protected]
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class gcn_layer(nn.Module):
def __init__(self, ip_size, op_size):
super(gcn_layer, self).__init__()
self.ip_size = ip_size # number of features for each node in the input
self.op_size = op_size # number of features for each node in the output
self.weights = Parameter(
torch.rand(
self.ip_size, self.op_size, dtype=torch.float32, requires_grad=True
)
)
def compute(self, admat, features):
""" Forward Propagation through the layer according to the spectral rule """
self.D = torch.diag(admat.sum(1), diagonal=0)
self.out = torch.empty(admat.size[0], self.op_size)
self.a_hat = admat + torch.eye(
admat.size[0]
) # Counting the contribution of each node to itself
self.D_inv = self.D ** (-0.5)
self.a_hat = (
self.D_inv * self.a_hat * self.D_inv
) # Normalising according to the spectral rule
self.out = torch.dot(
torch.dot(self.a_hat, features), self.weights
) # Forward propagate trhough the layer
return self.out
| 32.6 | 84 | 0.638718 | [
"MIT"
] | veds12/aihaven | gcn/layers.py | 1,467 | Python |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
PROJECT_NAME = "Perceptron_PyPi_package"
USER_NAME = "Jap_patel"
setuptools.setup(
name=f"{PROJECT_NAME}-{USER_NAME}",
version="0.0.2",
author=USER_NAME,
author_email="[email protected]",
description="its an implementation of perceptron" ,
long_description=long_description,
long_description_content_type="text/markdown",
url=f"https://github.com/{USER_NAME}/{PROJECT_NAME}",
project_urls={
"Bug Tracker": f"https://github.com/{USER_NAME}/{PROJECT_NAME}/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.7",
install_requires=[
"numpy",
"tqdm"
]
) | 29.272727 | 79 | 0.650104 | [
"MIT"
] | jap-patel/Perceptron_PyPi_package | setup.py | 966 | Python |
import os
import portalocker
from deep_architect.contrib.communicators.communicator import Communicator
from deep_architect.contrib.communicators.file_utils import (consume_file,
read_file,
write_file)
class FileCommunicator(Communicator):
def __init__(self,
num_procs,
dirname='file_comm',
worker_queue_file='worker_queue',
worker_results_prefix='worker_results_'):
# make directory where communication files are created
try:
os.makedirs(dirname)
except OSError:
pass
# claim a rank for the process
lock = portalocker.Lock(os.path.join(dirname, 'init'),
mode='a+',
flags=portalocker.LOCK_EX)
lock.acquire()
fh = lock.fh
fh.seek(0)
curnum = fh.read()
if len(curnum) is 0:
rank = 0
else:
rank = int(curnum)
if rank >= num_procs:
raise ValueError('Number of processes > the number of workers')
fh.seek(0)
fh.truncate(0)
fh.write(str(rank + 1))
lock.release()
super(FileCommunicator, self).__init__(num_procs - 1, rank)
self.worker_queue_file = os.path.join(dirname, worker_queue_file)
self.worker_results_prefix = os.path.join(dirname,
worker_results_prefix)
self.done = False
def _publish_results_to_master(self, results, evaluation_id,
searcher_eval_token):
write_file(self.worker_results_prefix + str(self.rank),
(results, evaluation_id, searcher_eval_token))
def _receive_architecture_in_worker(self):
while not self.done:
file_data = consume_file(self.worker_queue_file)
# continue looping until there is something in the queue file
if file_data is None:
continue
# if kill signal is given, return None, otherwise return contents of file
vs, evaluation_id, searcher_eval_token, kill = file_data
if kill:
write_file(self.worker_results_prefix + str(self.rank), 'done')
self.done = True
return None
return vs, evaluation_id, searcher_eval_token
return None
def _is_ready_to_publish_architecture(self):
file_data = read_file(self.worker_queue_file)
return file_data is None
def _publish_architecture_to_worker(self, vs, current_evaluation_id,
searcher_eval_token):
write_file(self.worker_queue_file,
(vs, current_evaluation_id, searcher_eval_token, False))
def _receive_results_in_master(self, src):
result = consume_file(self.worker_results_prefix + str(src + 1))
if result == 'done':
self.finished += 1
return None
return result
def _kill_worker(self):
write_file(self.worker_queue_file, (0, 0, 0, True))
| 36.393258 | 85 | 0.574251 | [
"MIT"
] | aspratyush/deep_architect | deep_architect/contrib/communicators/file_communicator.py | 3,239 | Python |
"""passes the credits"""
class Settings:
login_username = ''
login_password = ''
| 10.75 | 24 | 0.662791 | [
"MIT"
] | imansh77/instagram_unfollower | crawler/conf.py | 86 | Python |
"""Tensorflow trainer class."""
import datetime
import math
import os
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.distribute.values import PerReplica
from .integrations import is_comet_available, is_wandb_available
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_wandb_available():
import wandb
if is_comet_available():
import comet_ml
logger = logging.get_logger(__name__)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
**kwargs,
):
assert parse(tf.__version__).release >= (2, 2, 0), (
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r "
% tf.__version__
)
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if "prediction_loss_only" in kwargs:
warnings.warn(
"Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.",
FutureWarning,
)
self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
if is_comet_available():
self.setup_comet()
elif os.environ.get("COMET_MODE") != "DISABLED":
logger.info(
"To use comet_ml logging, run `pip/conda install comet_ml` "
"see https://www.comet.ml/docs/python-sdk/huggingface/"
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
power=self.args.poly_power,
)
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
def setup_comet(self):
"""
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
"""
comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
experiment = None
if comet_mode == "ONLINE":
experiment = comet_ml.Experiment(**args)
logger.info("Automatic Comet.ml online logging enabled")
elif comet_mode == "OFFLINE":
args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
experiment = comet_ml.OfflineExperiment(**args)
logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
if experiment is not None:
experiment._set_model_graph(self.model, framework="transformers")
experiment._log_parameters(self.args, prefix="args/", framework="transformers")
experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / steps
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment._log_metrics(
logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers"
)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(
self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor
) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
self.eval_loss.update_state(scaled_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
logits = self.args.strategy.run(self.prediction_step, inputs)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size
# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because
# the dataset is repeated before being batched.
# It has the effect only when TPU is used which requires explicit tensor shape in order to make
# the gradient accumulation implementation work.
approx = math.floor if self.args.dataloader_drop_last else math.ceil
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
# At least one update for each epoch.
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if self.args.max_steps > 0:
t_total = self.args.max_steps
epochs = (self.args.max_steps // self.steps_per_epoch) + int(
self.args.max_steps % self.steps_per_epoch > 0
)
else:
t_total = self.steps_per_epoch * self.args.num_train_epochs
epochs = self.args.num_train_epochs
# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = self.global_step // self.steps_per_epoch
steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch
training_loss = self.train_loss.result() / (step + 1)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.args.eval_steps > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.args.max_steps > 0 and self.global_step >= t_total:
break
if self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels, nb_instances_in_global_batch):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels, nb_instances_in_global_batch):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels, nb_instances_in_global_batch)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = {
k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items()
}
reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas]
self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch)
features = {
k: tf.concat(
[ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]],
axis=0,
)
for k, ft in features.items()
}
labels = tf.concat(
[labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
self.args.strategy.run(self.apply_gradients, inputs)
@staticmethod
def _compute_nb_instances(batch):
labels = batch[-1]
if isinstance(labels, PerReplica):
labels = tf.concat(labels.values, axis=0)
nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32))
return nb_instances
@staticmethod
def _get_step_inputs(batch, nb_instances):
features, labels = batch
if isinstance(labels, PerReplica):
# need to make a `PerReplica` objects for ``nb_instances``
nb_instances = PerReplica([nb_instances] * len(labels.values))
step_inputs = (features, labels, nb_instances)
return step_inputs
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
| 43.88622 | 169 | 0.617013 | [
"Apache-2.0"
] | AdrienDS/transformers | src/transformers/trainer_tf.py | 34,717 | Python |
from logbunker.contexts.bunker.logs.domain.LogRepository import LogRepository
from logbunker.contexts.bunker.logs.domain.entities.Log import Log
from logbunker.contexts.bunker.logs.domain.entities.LogContent import LogContent
from logbunker.contexts.bunker.logs.domain.entities.LogCreationDate import LogCreationDate
from logbunker.contexts.bunker.logs.domain.entities.LogId import LogId
from logbunker.contexts.bunker.logs.domain.entities.LogLevel import LogLevel
from logbunker.contexts.bunker.logs.domain.entities.LogOrigin import LogOrigin
from logbunker.contexts.bunker.logs.domain.entities.LogTrace import LogTrace
from logbunker.contexts.bunker.logs.domain.entities.LogType import LogType
from logbunker.contexts.shared.domain.EventBus import EventBus
class LogCreator:
def __init__(self, log_repository: LogRepository, event_bus: EventBus):
self.__log_repository = log_repository
self.__event_bus = event_bus
async def run(
self,
log_id: LogId,
content: LogContent,
level: LogLevel,
origin: LogOrigin,
log_type: LogType,
trace: LogTrace,
creation_date: LogCreationDate,
):
log: Log = Log.create(log_id, content, level, origin, log_type, trace, creation_date)
await self.__log_repository.create_one(log)
await self.__event_bus.publish(log.pull_domain_events())
| 44.375 | 93 | 0.75493 | [
"MIT"
] | parada3desu/logbunker | logbunker/contexts/bunker/logs/application/createone/LogCreator.py | 1,420 | Python |
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
from cfnlint.helpers import RESOURCE_SPECS
class AllowedValue(CloudFormationLintRule):
"""Check if properties have a valid value"""
id = 'E3030'
shortdesc = 'Check if properties have a valid value'
description = 'Check if properties have a valid value in case of an enumator'
source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedvalue'
tags = ['resources', 'property', 'allowed value']
def initialize(self, cfn):
"""Initialize the rule"""
for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
self.resource_property_types.append(resource_type_spec)
for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
self.resource_sub_property_types.append(property_type_spec)
def check_value(self, value, path, property_name, **kwargs):
"""Check Value"""
matches = []
allowed_value_specs = kwargs.get('value_specs', {}).get('AllowedValues', {})
if allowed_value_specs:
# Always compare the allowed value as a string, strict typing is not of concern for this rule
if str(value) not in allowed_value_specs:
message = 'You must specify a valid value for {0} ({1}).\nValid values are {2}'
matches.append(RuleMatch(path, message.format(property_name, value, allowed_value_specs)))
return matches
def check(self, cfn, properties, value_specs, property_specs, path):
"""Check itself"""
matches = list()
for p_value, p_path in properties.items_safe(path[:]):
for prop in p_value:
if prop in value_specs:
value = value_specs.get(prop).get('Value', {})
if value:
value_type = value.get('ValueType', '')
property_type = property_specs.get('Properties').get(prop).get('Type')
matches.extend(
cfn.check_value(
p_value, prop, p_path,
check_value=self.check_value,
value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),
cfn=cfn, property_type=property_type, property_name=prop
)
)
return matches
def match_resource_sub_properties(self, properties, property_type, path, cfn):
"""Match for sub properties"""
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})
property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)
matches.extend(self.check(cfn, properties, specs, property_specs, path))
return matches
def match_resource_properties(self, properties, resource_type, path, cfn):
"""Check CloudFormation Properties"""
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})
resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)
matches.extend(self.check(cfn, properties, specs, resource_specs, path))
return matches
| 48.663043 | 132 | 0.6587 | [
"MIT-0"
] | janssenivo/cfn-python-lint | src/cfnlint/rules/resources/properties/AllowedValue.py | 4,477 | Python |
#!/usr/bin/env python3
import os
import sys
from utils import config, logger, env
from librarian.librarian import Librarian
log = logger.get_log('KodiLibrarian')
kodi = Librarian(config.hosts, update_while_playing=config.update_while_playing)
if env.event == 'download':
if env.calledBy == 'radarr':
log.info('Radarr has downloaded "{}" {}. Initiating update process.'.format(env.movieTitle, env.moviePath))
kodi.updateMovie(env.movieTitle, env.movieDirectory, env.moviePath)
if config.clean_after_update:
kodi.cleanLibrary('movies')
elif env.calledBy == 'sonarr':
log.info('Sonarr has downloaded "{}" {}. Initiating update process.'.format(env.showTitle, env.episodePath))
kodi.updateTVShow(env.episodePath, env.showDirectory)
if config.clean_after_update:
kodi.cleanLibrary('tvshows')
elif env.calledBy == 'lidarr':
log.info('Lidarr not supported yet!! Aborting.')
elif env.event == 'test':
log.debug('Called with test environment from {}'.format(env.calledBy))
sys.exit(0)
else:
log.critical('Could not find any recognizable environment variables. Aborting.')
| 34.558824 | 116 | 0.700426 | [
"MIT"
] | jsaddiction/SharedLibraryManager | KodiLibrarian.py | 1,175 | Python |
from textsimilarity import clean_text, rankers, text_models
__all__ = ['clean_text', 'rankers', 'text_models']
| 28 | 59 | 0.776786 | [
"MIT"
] | NalaniKai/TextSimilarity | textsimilarity/__init__.py | 112 | Python |
import layers
import wrappers
import replay_buffer | 16.666667 | 20 | 0.9 | [
"MIT"
] | charleneeboo/flatland | common/__init__.py | 50 | Python |
from .access_code import SubmitterAccessCode
from .cfp import CfP
from .feedback import Feedback
from .question import Answer, AnswerOption, Question, QuestionTarget, QuestionVariant
from .resource import Resource
from .review import Review, ReviewPhase
from .submission import Submission, SubmissionError, SubmissionStates
from .track import Track
from .type import SubmissionType
__all__ = [
"Answer",
"AnswerOption",
"CfP",
"Feedback",
"Question",
"QuestionTarget",
"QuestionVariant",
"Resource",
"Review",
"ReviewPhase",
"Submission",
"SubmissionError",
"SubmissionStates",
"SubmissionType",
"SubmitterAccessCode",
"Track",
]
| 23.965517 | 85 | 0.719424 | [
"Apache-2.0"
] | MaximilianKindshofer/pretalx | src/pretalx/submission/models/__init__.py | 695 | Python |
import json
import platform
from django.db.models import Q
from django.http import HttpResponse
from django.http import HttpResponseNotFound
from morango.models import InstanceIDModel
from rest_framework import viewsets
from rest_framework.decorators import api_view
from rest_framework.response import Response
import kolibri
from .. import error_constants
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.serializers import PublicChannelSerializer
class InfoViewSet(viewsets.ViewSet):
"""
An equivalent endpoint in studio which allows kolibri devices to know
if this device can serve content.
Spec doc: https://docs.google.com/document/d/1XKXQe25sf9Tht6uIXvqb3T40KeY3BLkkexcV08wvR9M/edit#
"""
def list(self, request):
"""Returns metadata information about the device"""
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
info = {
"application": "kolibri",
"kolibri_version": kolibri.__version__,
"instance_id": instance_model.id,
"device_name": instance_model.hostname,
"operating_system": platform.system(),
}
return Response(info)
def _get_channel_list(version, params, identifier=None):
if version == "v1":
return _get_channel_list_v1(params, identifier=identifier)
else:
raise LookupError()
def _get_channel_list_v1(params, identifier=None):
keyword = params.get("keyword", "").strip()
language_id = params.get("language", "").strip()
channels = None
if identifier:
channels = ChannelMetadata.objects.filter(pk=identifier)
else:
channels = ChannelMetadata.objects.all()
if keyword != "":
channels = channels.filter(
Q(name__icontains=keyword) | Q(description__icontains=keyword)
)
if language_id != "":
matching_tree_ids = (
ContentNode.objects.prefetch_related("files")
.filter(
Q(lang__id__icontains=language_id)
| Q(files__lang__id__icontains=language_id)
)
.values_list("tree_id", flat=True)
)
channels = channels.filter(
Q(root__lang__id__icontains=language_id)
| Q(root__tree_id__in=matching_tree_ids)
)
return channels.filter(root__available=True).distinct()
@api_view(["GET"])
def get_public_channel_list(request, version):
""" Endpoint: /public/<version>/channels/?=<query params> """
try:
channel_list = _get_channel_list(version, request.query_params)
except LookupError:
return HttpResponseNotFound(
json.dumps({"id": error_constants.NOT_FOUND, "metadata": {"view": ""}}),
content_type="application/json",
)
return HttpResponse(
json.dumps(PublicChannelSerializer(channel_list, many=True).data),
content_type="application/json",
)
@api_view(["GET"])
def get_public_channel_lookup(request, version, identifier):
""" Endpoint: /public/<version>/channels/lookup/<identifier> """
try:
channel_list = _get_channel_list(
version,
request.query_params,
identifier=identifier.strip().replace("-", ""),
)
except LookupError:
return HttpResponseNotFound(
json.dumps({"id": error_constants.NOT_FOUND, "metadata": {"view": ""}}),
content_type="application/json",
)
if not channel_list.exists():
return HttpResponseNotFound(
json.dumps({"id": error_constants.NOT_FOUND, "metadata": {"view": ""}}),
content_type="application/json",
)
return HttpResponse(
json.dumps(PublicChannelSerializer(channel_list, many=True).data),
content_type="application/json",
)
| 32.5 | 99 | 0.664359 | [
"MIT"
] | MikiasEphrem/kolibri | kolibri/core/public/api.py | 3,900 | Python |
def power(x, y, serialId):
r = x + 10
p = r * y
p += serialId
p *= r
p = (p%1000)//100
return p-5
if __name__ == '__main__':
serialId = 1788
# serialId = 42
# serialId = 18
cum_sum_square = {}
for i in range(0, 301):
cum_sum_square[(0,i)] = 0
cum_sum_square[(i,0)] = 0
for i in range(1, 301):#row(y)
for j in range(1, 301):#col(x)
# print(j,i)
value = cum_sum_square[(j-1,i-1)]
for k in range(1, j):
value += power(k, i, serialId)
for k in range(1, i):
value += power(j, k, serialId)
cum_sum_square[(j,i)] = value + power(j, i, serialId)
largest_v = -1000000000
largest_cord = None
largest_s = 0
for k in range(1, 301):
for i in range(1, 301-k+1):
for j in range(1, 301-k+1):
v = cum_sum_square[(j+k-1,i+k-1)] + cum_sum_square[(j-1,i-1)] - cum_sum_square[(j+k-1,i-1)] - cum_sum_square[(j-1,i+k-1)]
if v>largest_v:
largest_v = v
largest_cord = (j,i)
largest_s = k
print(largest_cord, largest_v, largest_s) | 30.948718 | 137 | 0.487158 | [
"MIT"
] | antonydeepak/AdventOfCode2018 | 11/2.py | 1,207 | Python |
"""Tests for 2d flow around a cylinder with a conforming mesh and rans3p"""
from builtins import range
from builtins import object
from proteus.iproteus import *
from proteus import Comm
from proteus import Context
import tables
import importlib
comm = Comm.get()
Profiling.logLevel = 7
Profiling.verbose = False
import numpy as np
class Test_HotStart_rans3p(object):
@classmethod
def setup_class(cls):
cls._scriptdir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0,cls._scriptdir)
@classmethod
def teardown_class(cls):
sys.path.remove(cls._scriptdir)
pass
def setup_method(self, method):
"""Initialize the test problem. """
self.aux_names = []
def teardown_method(self, method):
pass
def test_hotstart_p1(self):
self.compare_name = "T01P1_hotstart"
self.example_setting("T=0.1 vspaceOrder=1 onlySaveFinalSolution=True",h5_filename="solution_p1")
self.example_setting("T=0.1 vspaceOrder=1 onlySaveFinalSolution=True isHotStart=True", h5_filename="solution_p1", check_result=True, isHotstart=True,hotstart_t=0.1)
def test_hotstart_p2(self):
self.compare_name = "T01P2_hotstart"
self.example_setting("T=0.1 vspaceOrder=2 onlySaveFinalSolution=True",h5_filename="solution_p2")
self.example_setting("T=0.1 vspaceOrder=2 onlySaveFinalSolution=True isHotStart=True", h5_filename="solution_p2", check_result=True, isHotstart=True,hotstart_t=0.1)
def example_setting(self, pre_setting, h5_filename, check_result=False, isHotstart=False, hotstart_t=0.0):
Context.contextOptionsString = pre_setting
from . import NS_hotstart_so as my_so
reload(my_so)
# defined in iproteus
opts.profile = False
opts.gatherArchive = True
opts.hotStart = isHotstart
opts.hotStartTime = hotstart_t
pList=[]
nList=[]
sList=[]
for (pModule,nModule) in my_so.pnList:
pList.append(
importlib.import_module("."+pModule,
"proteus.tests.HotStart_3P"))
nList.append(
importlib.import_module("."+nModule,
"proteus.tests.HotStart_3P"))
if pList[-1].name == None:
pList[-1].name = pModule
reload(pList[-1]) # Serious error
reload(nList[-1])
if my_so.sList == []:
for i in range(len(my_so.pnList)):
s = default_s
sList.append(s)
else:
sList = my_so.sList
my_so.name = h5_filename#"_hotstart_"+self.compare_name #save data with different filename
# NUMERICAL SOLUTION #
ns = proteus.NumericalSolution.NS_base(my_so,
pList,
nList,
sList,
opts)
self.aux_names.append(ns.modelList[0].name)
ns.calculateSolution(my_so.name)
if check_result:
# COMPARE VS SAVED FILES #
expected_path = 'comparison_files/' + self.compare_name + '.h5'
with tables.open_file(os.path.join(self._scriptdir, expected_path)) as expected, \
tables.open_file( my_so.name + '.h5') as actual:
assert np.allclose(expected.root.u_t2,
actual.root.u_t2,
atol=1e-10)
| 38.666667 | 172 | 0.592325 | [
"MIT"
] | burgreen/proteus | proteus/tests/HotStart_3P/test_HotStart_rans3p.py | 3,596 | Python |
import json
import hashlib
import os
import pickle
import re
import shutil
class Block:
def __init__(self, numberBlock, data, previousHash, idHash):
self._idBlock = numberBlock
self._data = data
self._previousHash = previousHash
self._idHash = idHash
self._checker = True
def getIdBlock(self):
return self._idBlock
def getData(self):
return self._data
def getPreviousHash(self):
return self._previousHash
def getIdHash(self):
return self._idHash
def getChecker(self):
return self._checker
def setData(self, data):
self._data = data
def setIdHash(self, idHash):
self._idHash = idHash
def setChecker(self, boolInfo):
self._checker = boolInfo
def getBlock(self):
return [self._idBlock, self._data, self._previousHash, self._idHash]
def getInfoGraph(self):
info = "Bloque: " + str(self._idBlock) + "\\nData: " + str(self._data) + "\\nHash Bloque: " + str(self._idHash)\
+ "\\nHash Ant.: " + str(self._previousHash)
return info
def verifyBlock(self, hashAnteriorBA):
if hashAnteriorBA == self._previousHash:
return True
# self._checker = False
return False
class Blockchain:
def __init__(self):
self.idChain = 1
self.previous = 0
self.blocks_list = []
self.firstHash = ""
self.checkerChain = True
def generate_hash(self, data):
pattern = r'[0-9a-zA-Z]+'
objectStr = pickle.dumps(data)
while True:
id_hash = hashlib.sha256(objectStr).hexdigest()
if re.match(pattern, id_hash):
return id_hash
def verifyFirstBlock(self, hashActual):
if self.firstHash == hashActual:
return True
return False
def insertBlock(self, tupla, nameJson):
id_hash = self.generate_hash(tupla)
newBlock = Block(self.idChain, tupla, self.previous, id_hash)
self.blocks_list.append(newBlock)
file = self.load_json(nameJson)
file.write(json.dumps([j.getBlock() for j in self.blocks_list]))
file.close()
# only for the first
if self.idChain == 1:
self.firstHash = id_hash
self.idChain += 1
self.previous = id_hash
def graphBlockchain(self, nombreImagen):
graph = 'digraph G{\n'
graph += 'rankdir=LR;\n'
graph += "node[shape = \"box\"]\n"
graph += self.__graficar()
graph += '}'
direccion = self.pathImageGraph()
file = open(f"{direccion}\\{nombreImagen}.dot", "w")
file.write(graph)
file.close()
os.system(f'dot -Tpng {direccion}\\{nombreImagen}.dot -o {direccion}\\{nombreImagen}.png')
def __graficar(self):
graph = ""
bandera = True
for i in range(len(self.blocks_list)):
info = self.blocks_list[i].getInfoGraph()
nodo = 'node' + str(self.blocks_list[i].getIdBlock())
color = "green"
# If is not the first, verify the previous hash
if not (i == 0):
hashAnterior = self.blocks_list[i-1].getIdHash()
brokeChain = self.blocks_list[i].verifyBlock(str(hashAnterior))
# If is the first, verify the actual hash, because the first always has previous in 0
else:
hashActual = self.blocks_list[i].getIdHash()
brokeChain = self.verifyFirstBlock(hashActual)
if not brokeChain:
self.checkerChain = False
bandera = False
if bandera is False:
color = "red"
# If is not the last to put the next pointer
if not (i == (len(self.blocks_list) - 1)):
nextId = self.blocks_list[i + 1].getIdBlock()
nextNodo = 'node' + str(nextId)
graph += nodo + f'[label="{info}", color="{color}", penwidth=3]\n'
graph += nodo + '->' + nextNodo + '\n'
# If is the Last not put the next pointer
else:
graph += nodo + f'[label="{info}", color="{color}", penwidth=3]\n'
# If is not the First to the Back pointer
if not (i == 0):
nodoAnterior = "node" + str(self.blocks_list[i-1].getIdBlock())
if color == "green":
graph += nodo + '->' + nodoAnterior + "\n"
graph += nodoAnterior + f"[color={color}]"
return graph
def updateBlock(self, oldTuple, newTuple, nameJson):
# Cambiando valores de la lista y generando nuevo hash
file = open(os.getcwd() + "\\DataJsonBC\\" + nameJson + ".json", "r")
JSblock_list = json.loads(file.read())
file.close()
newHash = self.generate_hash(newTuple)
# Recorriendo y actualizando JSON
for blockJS in JSblock_list:
if oldTuple == blockJS[1]:
blockJS[1] = newTuple
blockJS[3] = newHash
# recorriendo y actualizando Block list
for block in self.blocks_list:
if oldTuple == block.getData():
block.setData(newTuple)
block.setIdHash(newHash)
file = open(os.getcwd() + "\\DataJsonBC\\" + nameJson + ".json", "w+")
file.write(json.dumps(JSblock_list))
file.close()
# ------------------------------------------------------- FILES ----------------------------------------------------
def load_json(self, nombre):
if os.path.isdir(os.getcwd() + "\\DataJsonBC"):
file = open(os.getcwd() + "\\DataJsonBC\\" + nombre + ".json", "+w")
return file
os.makedirs(os.getcwd() + "\\DataJsonBC")
file = open(os.getcwd() + "\\DataJsonBC\\" + nombre + ".json", "+w")
return file
def pathImageGraph(self):
if not os.path.isdir(os.getcwd() + "\\ImageBlockChain"):
os.makedirs(os.getcwd() + "\\ImageBlockChain")
direccion = os.getcwd() + "\\ImageBlockChain"
return direccion
def removeFilesBlock(self, nameJson):
if os.path.isdir(os.getcwd() + "\\DataJsonBC"):
if os.path.isfile(os.getcwd() + "\\DataJsonBC\\" + nameJson + ".json"):
os.remove(os.getcwd() + "\\DataJsonBC\\" + nameJson + ".json")
# os.remove(os.getcwd() + "\\DataJsonBC\\" + nameJson + ".dot")
| 33.848958 | 120 | 0.550085 | [
"MIT"
] | jorgeisa/Respaldo_EDD_Fase2 | storage/fase2/team13/Blockchain.py | 6,499 | Python |
from d2lbook2 import notebook
from d2lbook2 import rst
import unittest
import nbconvert
_markdown_src = r'''
# Test
:label:`test`
first para
python is good
another para
This is :eqref:`sec_1`
```python2
1+2+3
```
python3 is better
- here
- haha
```{.input .python}
1+2+3
```
```{.input .python}
#@tab python2
1+2+3
```
```bash
````
aa
````
```
## Section 2
:label:`sec_2`
```eval_rst
.. only:: html
Table of Contents
-----------------
```
```toc
:numbered:
:maxdepth: 2
install
user/index
develop/index
```

:width:`400px`
$x=1$, :numref:`sec_2`
'''
class TestRst(unittest.TestCase):
# TODO(mli) add some asserts
def test_convert_notebook(self):
nb = notebook.read_markdown(_markdown_src)
body, _ = rst.convert_notebook(nb, {})
lines = body.split('\n')
for l in lines:
if l.startswith(':math:`x=1`'):
self.assertEqual(l, ':math:`x=1`, :numref:`sec_2`')
| 12.5375 | 67 | 0.601196 | [
"Apache-2.0"
] | aieye-top/d2l-book2 | d2lbook2/rst_test.py | 1,003 | Python |
from .default import DefaultAttackEval
from ..classifier import Classifier
from ..attacker import Attacker
import json
from tqdm import tqdm
class InvokeLimitException(Exception):
pass
class InvokeLimitClassifierWrapper(Classifier):
def __init__(self, clsf, invoke_limit):
self.__invoke_limit = invoke_limit
self.__clsf = clsf
self.__brk = False
self.__invoke = 0
def clear(self):
self.__invoke = 0
def test(self, limit=True):
self.__brk = limit
def get_invoke(self):
return self.__invoke
def get_pred(self, input_, data):
if self.__brk and self.__invoke >= self.__invoke_limit:
raise InvokeLimitException()
self.__invoke += len(input_)
return self.__clsf.get_pred(input_, data)
def get_prob(self, input_, data):
if self.__brk and self.__invoke >= self.__invoke_limit:
raise InvokeLimitException()
self.__invoke += len(input_)
return self.__clsf.get_prob(input_, data)
def get_grad(self, input_, labels, data):
if self.__brk and self.__invoke > self.__invoke_limit:
raise InvokeLimitException()
self.__invoke += len(input_)
return self.__clsf.get_grad(input_, labels, data)
class InvokeLimitAttackerWrapper(Attacker):
def __init__(self, attacker, clsf):
self.__attacker = attacker
self.__clsf = clsf
self.__exceed = False
def __call__(self, *args, **kwargs):
self.__clsf.test()
self.__clsf.clear()
self.__exceed = False
try:
ret = self.__attacker(*args, **kwargs)
except InvokeLimitException:
ret = None
self.__exceed = True
self.__clsf.test(limit=False)
return ret
def exceed(self):
return self.__exceed
class InvokeLimitedAttackEval(DefaultAttackEval):
"""
Evaluate attackers and classifiers with invoke limitation.
"""
def __init__(self, attacker, classifier, invoke_limit=100,
average_invoke=False, **kwargs):
"""
:param Attacker attacker: The attacker you use.
:param Classifier classifier: The classifier you want to attack.
:param int invoke_limit: Limitation of invoke for each instance.
:param bool average_invoke: If true, returns "Avg. Victim Model Queries".
:param kwargs: Other parameters, see :py:class:`.DefaultAttackEval` for detail.
"""
super().__init__(attacker, classifier, **kwargs)
# wrap classifier, attacker after super().__init__
self.classifier = InvokeLimitClassifierWrapper(self.classifier, invoke_limit)
self.attacker = InvokeLimitAttackerWrapper(self.attacker, self.classifier)
# keep a private version
self.__attacker = self.attacker
self.__classifier = self.classifier
self.__average_invoke = average_invoke
def measure(self, sentA, sentB):
info = super().measure(sentA, sentB)
if self.__attacker.exceed():
info["Query Exceeded"] = True
else:
info["Query Exceeded"] = False
# only records succeed attacks
if info["Succeed"] and self.__average_invoke:
info["Queries"] = self.__classifier.get_invoke()
return info
def update(self, info):
info = super().update(info)
if "Queries" in info:
if "invoke" not in self.__result:
self.__result["invoke"] = 0
self.__result["invoke"] += info["Queries"]
if info["Query Exceeded"]:
if "out_of_invoke" not in self.__result:
self.__result["out_of_invoke"] = 0
self.__result["out_of_invoke"] += 1
return info
def clear(self):
super().clear()
self.__result = {}
def get_result(self):
ret = super().get_result()
if self.__average_invoke and "invoke" in self.__result:
ret["Avg. Victim Model Queries"] = self.__result["invoke"] / ret["Successful Instances"]
return ret
| 33.451613 | 100 | 0.623915 | [
"MIT"
] | agcopenhaver/OpenAttack | OpenAttack/attack_evals/invoke_limit_eval.py | 4,148 | Python |
"""
Common utilities for the library
"""
import shutil
import sys
import os
import logging
from aws_lambda_builders.architecture import X86_64, ARM64
LOG = logging.getLogger(__name__)
def copytree(source, destination, ignore=None, include=None):
"""
Similar to shutil.copytree except that it removes the limitation that the destination directory should
be present.
:type source: str
:param source:
Path to the source folder to copy
:type destination: str
:param destination:
Path to destination folder
:type ignore: function
:param ignore:
A function that returns a set of file names to ignore, given a list of available file names. Similar to the
``ignore`` property of ``shutils.copytree`` method
:type include: Callable[[str], bool]
:param include:
A function that will decide whether a file should be copied or skipped it. It accepts file name as parameter
and return True or False. Returning True will continue copy operation, returning False will skip copy operation
for that file
"""
if not os.path.exists(source):
LOG.warning("Skipping copy operation since source %s does not exist", source)
return
if not os.path.exists(destination):
LOG.debug("Creating target folders at %s", destination)
os.makedirs(destination)
try:
# Let's try to copy the directory metadata from source to destination
LOG.debug("Copying directory metadata from source (%s) to destination (%s)", source, destination)
shutil.copystat(source, destination)
except OSError as ex:
# Can't copy file access times in Windows
LOG.debug("Unable to copy file access times from %s to %s", source, destination, exc_info=ex)
names = os.listdir(source)
if ignore is not None:
ignored_names = ignore(source, names)
else:
ignored_names = set()
for name in names:
# Skip ignored names
if name in ignored_names:
LOG.debug("File (%s) is in ignored set, skipping it", name)
continue
new_source = os.path.join(source, name)
new_destination = os.path.join(destination, name)
if include and not os.path.isdir(new_source) and not include(name):
LOG.debug("File (%s) doesn't satisfy the include rule, skipping it", name)
continue
if os.path.isdir(new_source):
copytree(new_source, new_destination, ignore=ignore, include=include)
else:
LOG.debug("Copying source file (%s) to destination (%s)", new_source, new_destination)
shutil.copy2(new_source, new_destination)
# NOTE: The below function is copied from Python source code and modified
# slightly to return a list of paths that match a given command
# instead of returning just the first match
# The function "which" at aws_lambda_builders/utils.py was copied from https://github.com/python/cpython/blob/3.7/Lib/shutil.py
# SPDX-License-Identifier: Python-2.0
# Copyright 2019 by the Python Software Foundation
def which(cmd, mode=os.F_OK | os.X_OK, executable_search_paths=None): # pragma: no cover
"""Given a command, mode, and executable search paths list, return the paths which
conforms to the given mode on the PATH with the prepended additional search paths,
or None if there is no such file.
`mode` defaults to os.F_OK | os.X_OK. the default search `path` defaults
to the result of os.environ.get("PATH")
Note: This function was backported from the Python 3 source code.
:type cmd: str
:param cmd:
Executable to be looked up in PATH.
:type mode: str
:param mode:
Modes of access for the executable.
:type executable_search_paths: list
:param executable_search_paths:
List of paths to look for `cmd` in preference order.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if executable_search_paths:
path = executable_search_paths + path
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise we
# have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
paths = []
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
paths.append(name)
return paths
def get_goarch(architecture):
"""
Parameters
----------
architecture : str
name of the type of architecture
Returns
-------
str
returns a valid GO Architecture value
"""
return "arm64" if architecture == ARM64 else "amd64"
| 33.686486 | 127 | 0.652599 | [
"Apache-2.0"
] | awslabs/aws-lambda-builders | aws_lambda_builders/utils.py | 6,232 | Python |
import tensorflow as tf
import os
import json
import subprocess
from scipy.misc import imread, imresize
from scipy import misc
from train import build_forward
from utils.annolist import AnnotationLib as al
from utils.train_utils import add_rectangles, rescale_boxes
import cv2
import argparse
def get_image_dir(args):
weights_iteration = int(args.weights.split('-')[-1])
expname = '_' + args.expname if args.expname else ''
image_dir = '%s/images_%s_%d%s' % (os.path.dirname(args.weights), os.path.basename(args.test_boxes)[:-5], weights_iteration, expname)
return image_dir
def get_results(args, H):
tf.reset_default_graph()
x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3])
if H['use_rezoom']:
pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
grid_area = H['grid_height'] * H['grid_width']
pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2])
if H['reregress']:
pred_boxes = pred_boxes + pred_boxes_deltas
else:
pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, args.weights)
pred_annolist = al.AnnoList()
true_annolist = al.parse(args.test_boxes)
data_dir = os.path.dirname(args.test_boxes)
image_dir = get_image_dir(args)
os.makedirs(image_dir)
for i in range(len(true_annolist)):
true_anno = true_annolist[i]
orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3]
img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic')
feed = {x_in: img}
(np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed)
pred_anno = al.Annotation()
pred_anno.imageName = true_anno.imageName
new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes,
use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed)
pred_anno.rects = rects
pred_anno.imagePath = os.path.abspath(data_dir)
pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1])
pred_annolist.append(pred_anno)
imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName))
misc.imsave(imname, new_img)
if i % 25 == 0:
print(i)
return pred_annolist, true_annolist
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', required=True)
parser.add_argument('--expname', default='')
parser.add_argument('--test_boxes', required=True)
parser.add_argument('--gpu', default=0)
parser.add_argument('--logdir', default='output')
parser.add_argument('--iou_threshold', default=0.5, type=float)
parser.add_argument('--tau', default=0.25, type=float)
parser.add_argument('--min_conf', default=0.2, type=float)
parser.add_argument('--show_suppressed', default=True, type=bool)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
hypes_file = '%s/hypes.json' % os.path.dirname(args.weights)
with open(hypes_file, 'r') as f:
H = json.load(f)
expname = args.expname + '_' if args.expname else ''
pred_boxes = '%s.%s%s' % (args.weights, expname, os.path.basename(args.test_boxes))
true_boxes = '%s.gt_%s%s' % (args.weights, expname, os.path.basename(args.test_boxes))
pred_annolist, true_annolist = get_results(args, H)
pred_annolist.save(pred_boxes)
true_annolist.save(true_boxes)
try:
rpc_cmd = './utils/annolist/doRPC.py --minOverlap %f %s %s' % (args.iou_threshold, true_boxes, pred_boxes)
print('$ %s' % rpc_cmd)
rpc_output = subprocess.check_output(rpc_cmd, shell=True)
print(rpc_output)
txt_file = [line for line in rpc_output.split('\n') if line.strip()][-1]
output_png = '%s/results.png' % get_image_dir(args)
plot_cmd = './utils/annolist/plotSimple.py %s --output %s' % (txt_file, output_png)
print('$ %s' % plot_cmd)
plot_output = subprocess.check_output(plot_cmd, shell=True)
print('output results at: %s' % plot_output)
except Exception as e:
print(e)
if __name__ == '__main__':
main()
| 45.358491 | 161 | 0.656614 | [
"Apache-2.0",
"MIT"
] | lilohuang/TensorBox | evaluate.py | 4,808 | Python |
# -*- coding: utf-8 -*-
# # How long does a Computron take?
#
# - [build model of computron\-to\-wallclock relationship · Issue \#3459 · Agoric/agoric\-sdk](https://github.com/Agoric/agoric-sdk/issues/3459)
# ## Preface: Python Data Tools
#
# See also [shell.nix](shell.nix).
# +
import pandas as pd
import numpy as np
import sqlalchemy as sqla
import matplotlib.cm as cm
import dask
import dask.dataframe as dd
import dask.bag as db
dict(pandas=pd.__version__,
numpy=np.__version__,
sqlalchemy=sqla.__version__,
dask=dask.__version__)
# -
# ### Notebook / Scripting Authority
#
# As a nod to OCap discipline, we avoid ambient authority unless we're in a `TOP`-level scripting or notebook context.
TOP = __name__ == '__main__'
# Logging is a bit of an exception to OCap discipline, as is stderr.
# +
import logging
from sys import stderr
logging.basicConfig(level=logging.INFO, stream=stderr,
format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
log = logging.getLogger(__name__)
if TOP:
log.info('notebook start')
# -
# ### Dask Parallel Scheduler UI
# +
from dask.distributed import Client, LocalCluster
if TOP:
cluster = LocalCluster(n_workers=8)
client = Client(cluster)
TOP and client
# -
# ## Result Store
# +
db4_uri = 'sqlite:///slog4.db'
if TOP:
db4 = sqla.create_engine(db4_uri)
# -
# ## SLog files
#
# [rclone support for Google drive](https://rclone.org/drive/)
#
# > This contains 564GB of data from 117 participants, spread across 172 slogfiles ...
#
# ```
# [nix-shell:~/t4]$ rclone sync --progress 'Engineering:/2021-07-04 testnet phase4-stress data/validator slogfiles' ./slogfiles/
# Transferred: 78.633G / 78.633 GBytes, 100%, 101.302 MBytes/s, ETA 0s
# Checks: 5 / 5, 100%
# Transferred: 182 / 182, 100%
# Elapsed time: 13m16.0s
# ```
#
# +
import importlib
import slogdata
importlib.reload(slogdata)
from slogdata import SlogAccess, CLI, show_times
if TOP:
def _dir(path):
import pathlib
return pathlib.Path(path)
def _cli(bin):
from subprocess import run, Popen
return CLI(bin, run, Popen, debug=True)
_sa4 = SlogAccess(_dir('/home/customer/t4/slogfiles'),
_cli('/home/customer/projects/gztool/gztool'))
TOP and show_times(_sa4.get_records('pathrocknetwork/chain-15.pathrocknetwork.slog.gz', 7721, 2))
# -
_bySize = _sa4.files_by_size()
_bySize
_bySize[_bySize.parent == 'KingSuper']
TOP and _bySize[::5].set_index('name')[['st_size']].plot.barh(
title='slogfile sizes (sample)',
figsize=(10, 8));
# ### random access with `gztool`
#
# [gztool](https://github.com/circulosmeos/gztool) `a03c5b4fd5b3` Jul 13 2021.
#
#
# ```
# ~/projects/gztool/gztool -C -e */*.slog.gz
# ...
# ERROR: Compressed data error in 'atlantean/atlantean-agorictest16-chain.slog.gz'.
# ...
# Index file 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gzi' already exists and will be used.
# Processing 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gz' ...
# Processing index to 'ZenQQQ/ZenQQQ-agorictest16-chain.slog.gzi'...
#
# 172 files processed
# 1 files processed with errors!
# ```
# +
# count lines on all slogfiles in parallel
# TODO: if it's already in the DB, don't compute it again.
if TOP:
_withLines = _bySize.assign(
lines=db.from_sequence(_bySize.values).map(
lambda v: _sa4.line_count(*v[1:3])).compute())
TOP and _withLines
# -
_withLines.to_sql('file_meta', db4, index=False, if_exists='replace')
# !sqlite3 slog4.db '.header on' '.mode column' 'select * from file_meta limit 3'
_withLines = pd.read_sql_table('file_meta', db4)
# +
def file_chart(slogdf, sample=5, **plotkw):
df = slogdf[['name', 'st_size', 'lines']].copy()
df['b64'] = df.st_size / 64
df.drop('st_size', axis=1, inplace=True)
df.set_index('name')[::sample].plot.barh(**plotkw)
TOP and file_chart(_withLines, title='slogfile sizes (sample)', figsize=(10, 8))
# -
# ## slogfile basics
pd.read_sql("""
select st_size, lines
from file_meta
order by st_size desc
""", db4).describe()
# ## Runs, Blocks, and Deliveries
#
# > split each slogfile into runs (each beginning with an import-kernel event)
# +
def partition_lines(lines, step=1000000):
"""Note: line numbers are **1-based**
"""
lo = pd.DataFrame.from_records([
dict(start=lo, qty=min(lines + 1 - lo, step), lines=lines)
for lo in range(1, lines + 1, step)])
return lo
partition_lines(_withLines.lines.iloc[-1])
# +
#client.restart()
# +
# # !sqlite3 slog4.db 'drop table run'
# +
def provide_table(engine, table, todo, chunksize=None, index=True):
if sqla.inspect(engine).has_table(table):
return pd.read_sql_table(table, engine, chunksize=chunksize)
df = todo()
df.to_sql(table, engine, index=index)
return df
def runs_todo(withLines):
runs = dd.from_delayed([
dask.delayed(_sa4.provide_runs)(f.parent, f['name'], part.start, part.qty)
for fid, f in withLines.iterrows()
for _, part in partition_lines(f.lines).iterrows()
]).compute().sort_values(['file_id', 'line'])
withNames = pd.merge(runs, withLines[['file_id', 'parent', 'name', 'st_size', 'lines']],
on='file_id')
# Compute end times
byFile = withNames.groupby('file_id')
runs = pd.concat([
withNames,
byFile.apply(lambda g: pd.DataFrame(dict(time_end=g.time.shift(-1)))),
byFile.apply(lambda g: pd.DataFrame(dict(line_end=g.line.shift(-1)))),
], axis=1)
runs.line_end = np.where(runs.line_end.isnull(), runs.lines, runs.line_end)
return runs.sort_values(['st_size', 'file_id', 'line']).reset_index(drop=True)
_runs = provide_table(db4, 'run', lambda: runs_todo(_withLines))
# -
# !sqlite3 slog4.db '.schema run'
show_times(_runs, ['time', 'time_end'])[['st_size', 'line', 'line_end', 'parent', 'file_id', 'time', 'time_end']]
# ### runs per slogfile
df = _runs.groupby('file_id')[['line']].count()
df.describe()
# +
df = pd.read_sql("""
select file_id, count(*) runs, name, st_size, lines
from run r
-- join file_id s on s."index" = r.slogfile
group by file_id
order by 2
""", db4)
df.set_index('name')[['runs']][::5].plot.barh(
log=True,
title='slogfile runs (sample)',
figsize=(10, 8));
# -
# ## agorictest-16 genesis: `2021-07-01 19:00:00`
gen16 = show_times(pd.DataFrame(dict(blockHeight=64628, blockTime=[1625166000], ts=1625166000)), ['blockTime'])
gen16
# ## Block end start / finish events
# +
import importlib
import slogdata
from slogdata import SlogAccess
importlib.reload(slogdata)
_sa4 = SlogAccess(_dir('/home/customer/t4/slogfiles'),
_cli('/home/customer/projects/gztool/gztool'))
show_times(
_sa4.provide_blocks('ChainodeTech', 'agorictest-16_chain.slog.gz', 1, 1000000)
)
# -
# ## Separate runs by chain
# +
def first_block(sa, run,
head=5000,
ts=gen16.ts[0]):
log.info('1st block: %s/%s', run.parent, run['name'])
qty = min(int(run.line_end) - run.line + 1, head)
df = sa.get_blocks(f'{run.parent}/{run["name"]}', run.line, qty)[:2]
if not len(df):
return pd.DataFrame.from_records([dict(
blockHeight=-1,
blockTime=-1,
run=run.name,
chain=np.nan)], index=[run.name])
df = df.assign(run=run.name,
chain=16 if df.blockTime[0] >= ts else 15)
return df
show_times(first_block(_sa4, _runs.loc[0]))
# +
def run2chain(sa, runs):
df = runs.apply(lambda run: first_block(sa, run).iloc[0][['blockHeight', 'blockTime', 'chain']],
axis=1)
return df
_r2c = run2chain(_sa4, _runs)
_r2c
# -
_runchain = pd.concat([_runs.drop(columns=['index']), _r2c], axis=1)
_runchain.to_sql('runchain', db4)
_runchain.groupby('chain')[['line']].count()
# !sqlite3 slog4.db '.header on' '.mode column' 'select * from runchain limit 3'
_runchain = pd.read_sql('runchain', db4)
_runchain.groupby('chain')[['line']].count()
_runs['chain'] = _runchain.chain
_runs.groupby('chain')[['file_id', 'lines']].count()
# +
# # !sqlite3 slog4.db 'drop table blockval;'
# +
def blockval_todo(file_meta):
return dd.from_delayed([
dask.delayed(_sa4.provide_blocks)(f.parent, f['name'], part.start, part.qty)
for fid, f in file_meta.iterrows()
for _, part in partition_lines(f.lines).iterrows()
]).compute()
_blockval = provide_table(db4, 'blockval', lambda: blockval_todo(_withLines), index=True)
show_times(_blockval)
# -
# !sqlite3 slog4.db '.schema blockval'
pd.read_sql("""
select file_id, max(blockHeight)
from blockval
where blockTime >= 1625166000
group by file_id
order by 2 desc
""", db4)
# ### Consensus Block-to-Block Time
# +
# db4.execute("""drop table if exists block""")
# -
db4.execute("""
create table block as
select distinct
case when blockTime >= 1625166000 then 16 else 15 end chain
, blockHeight, blockTime
from blockval
order by blockTime
""")
pd.read_sql("""
select * from block limit 10
""", db4)
# ### What is the range of blocks in `agorictest-16`?
pd.read_sql("""
select lo, n, lo + n - 1, hi from (
select min(blockHeight) lo, max(blockHeight) hi, count(distinct blockHeight) n
from block
where chain = 16
)
""", db4)
# +
blk16 = pd.read_sql("""
select blockHeight, blockTime
from block
where chain = 16
""", db4, index_col='blockHeight')
show_times(blk16).describe(datetime_is_numeric=True)
# -
b16time = pd.read_sql("""
select * from block
where chain = 16
""", db4, index_col='blockHeight')
b16time['delta'] = b16time.shift(-1).blockTime - b16time.blockTime
b16time[['delta']].describe()
b16time[b16time.index < 90527].delta.max()
b16time[b16time.delta == 120]
b16time[['delta']].plot(
title='agorictest-16 consensus blockTime delta',
ylabel='sec',
figsize=(9, 6));
show_times(b16time, ['blockTime']).set_index('blockTime')[['delta']].plot(
title='agorictest-16 consensus blockTime delta',
ylabel='sec',
figsize=(9, 6));
# histogram of block-to-block time delta for agorictest-16. (_Note the log scale on the y axis._)
b16time[['delta']].hist(bins=20, log=True);
df = show_times(b16time, ['blockTime'])
df[df.blockTime <= '2021-07-02 19:00:00'][['delta']].hist(bins=20, log=True);
df[df.blockTime <= '2021-07-02 19:00:00'][['delta']].describe()
# ### How many validators logged each block in agorictest-16?
df = pd.read_sql("""
select blockHeight, count(distinct file_id) qty
from blockval
where sign = -1
and blockTime >= 1625166000
group by blockHeight
""", db4)
df.head()
df.set_index('blockHeight').plot(title='agorictest-16 validator coverage by block', figsize=(9, 6));
# !sqlite3 slog4.db '.schema run'
# +
# db4.execute('drop table if exists blockrun16')
db4.execute("""
create table blockrun16 as
with b as (
select *
from blockval
where blockTime >= 1625166000
)
select file_id
, (select r."index"
from run r
where r.file_id = b.file_id and r.line <= b.line and b.line < r.line_end) run
, b.line, b.time
, b.sign
, blockHeight, blockTime
from b
""")
df = pd.read_sql("""
select * from blockrun16
""", db4)
df.tail()
# -
x = df.groupby('blockHeight')[['run']].count()
x.plot();
x['blockHeight'].sort_values('max').reset_index(drop=True).plot();
# ## Slow Blocks
df = show_times(b16time, ['blockTime'])
df[(df.blockTime <= '2021-07-02 19:00:00') &
(df.delta >= 30)]
# Which runs include block 72712, which took 31 sec?
b33 = pd.read_sql("""
select lo.file_id, lo.run, lo.line, hi.line - lo.line + 1 range, lo.blockHeight
from blockrun16 lo
join blockrun16 hi on hi.run = lo.run and hi.blockHeight = lo.blockHeight
where lo.blockHeight in (72712)
and lo.sign = -1
and hi.sign = 1
""", db4)
b33
# ## Correlating block start with block end
_blockrun16 = df = pd.read_sql_table('blockrun16', db4)
df.tail()
lo = df[df.sign == -1]
hi = df.shift(-1)
hi = hi[hi.sign == 1]
dur = hi.time - lo.time
# show_times(df, ['time', 'time_end'])
lo['dur'] = dur
lo['s_hi'] = hi.file_id
lo['l_hi'] = hi.line
lo['t_hi'] = hi.time
dur = lo[lo.file_id == lo.s_hi]
show_times(dur, ['time', 'blockTime'])
show_times(
dur.sort_values('dur').dropna().tail(),
['time', 'blockTime', 't_hi']
)
dur[dur.dur.abs() <= 120].plot.scatter(x='blockHeight', y='dur')
dur[['blockHeight', 'dur']].describe()
# ## Cranks in a Block
# +
def long_runs_including(runs, blockrun, blockHeight):
runs_matching = blockrun[blockrun.blockHeight == blockHeight].run
runs = runs.assign(length=runs.line_end - runs.line)
runs = runs[runs.index.isin(runs_matching)]
return runs.sort_values('length', ascending=False)
_long16 = long_runs_including(_runs, _blockrun16, 64628)
_long16.head()
# -
show_times(dur[dur.run == _long16.index[0]], ['time', 'blockTime', 't_hi'])
_blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64628)].iloc[:2]
# +
def blockrun_records(blockHeight, run, slogAccess, blockrun,
target=None, include=None):
ref = f'{run.parent}/{run["name"]}'
br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]
block_start = br.iloc[0] # assert sign == -1?
block_end = br.iloc[1]
length = block_end.line - block_start.line + 1
df = slogAccess.get_records(f'{run.parent}/{run["name"]}', int(block_start.line), int(length),
target=target, include=include)
return df.assign(file_id=run.file_id)
def get_vats(slogAccess, ref, start, qty):
df = slogAccess.get_records(ref, start, qty,
target='create-vat',
include=['create-vat'])
return df
def vats_in_blockrun(blockHeight, run, slogAccess, blockrun):
br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]
block_start = br.iloc[0] # assert sign == -1?
block_end = br.iloc[1]
length = block_end.line - block_start.line + 1
ref = f'{run.parent}/{run["name"]}'
df = get_vats(slogAccess, ref, int(block_start.line), int(length))
return df.assign(blockHeight=blockHeight, parent=run.parent)
# _sa4.get_records('Nodeasy.com/Nodeasy.com-agorictest15-chain.slog.gz', 1662497, 1671912 - 1662497)
vats_in_blockrun(_blockrun16.iloc[0].blockHeight, _runs.loc[_long16.index[0]],
_sa4, _blockrun16)
# -
vats_in_blockrun(64629, _runs.loc[_long16.index[0]],
_sa4, _blockrun16)
no_deliveries = pd.DataFrame.from_records([
{'time': 1625198620.6265895,
'type': 'deliver-result',
'crankNum': 1291,
'vatID': 'v11',
'deliveryNum': 124,
'kd': object(),
'line': 1673077,
'dr': object(),
'syscalls': 2,
'method': 'inbound',
'compute': 119496.0, # missing compute is possible... from replay.
'dur': 0.1912224292755127,
}]).iloc[:0]
no_deliveries.dtypes
# +
import json
import itertools
# {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394}
# {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394}
def block_cranks(records):
deliveries = []
syscalls = 0
deliver = None
for record in records:
ty = record['type']
if ty == 'deliver':
deliver = record
syscalls = 0
elif ty == 'syscall-result':
syscalls += 1
elif ty == 'deliver-result':
if not deliver:
log.warn('no deliver? %s', record)
continue
dur = record['time'] - deliver['time']
method = deliver['kd'][2]['method'] if deliver['kd'][0] == 'message' else None
compute = record['dr'][2]['compute'] if type(record['dr'][2]) is type({}) else np.nan
detail = dict(record,
syscalls=syscalls,
kd=deliver['kd'],
method=method,
compute=compute,
dur=dur)
deliveries.append(detail)
if deliveries:
return pd.DataFrame.from_records(deliveries)
else:
return no_deliveries
def get_deliveries(slogAccess, ref, start, qty):
if qty <= 2: # just block start, block end
return no_deliveries
df = slogAccess.get_records(
ref, int(start), int(qty),
target=None, include=['deliver', 'deliver-result', 'syscall-result'])
if len(df) > 0 and 'syscallNum' in df.columns:
for c in ['syscallNum', 'ksr', 'vsr', 'vd']:
df = df.drop(columns=list(set(df.columns) & set(['syscallNum', 'ksr', 'vsr', 'vd'])))
return block_cranks(df.to_dict('records'))
else:
return no_deliveries
_g16 = _blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64628)].iloc[:2]
_run1 = _runs.loc[_long16.index[0]]
get_deliveries(_sa4, f'{_run1.parent}/{_run1["name"]}', _g16.iloc[0].line, _g16.iloc[1].line - _g16.iloc[0].line + 1)
# -
df = dur[dur.run == _long16.index[0]].assign(length=dur.l_hi - dur.line + 1)
# df[df.length > 2].head(10)
df[df.dur > 5].head(10)
# +
# https://avi.im/blag/2021/fast-sqlite-inserts/
def run_sql(script, engine):
for stmt in script.strip().split(';\n'):
engine.execute(stmt)
run_sql('''
PRAGMA journal_mode = OFF;
PRAGMA synchronous = 0;
PRAGMA cache_size = 1000000;
PRAGMA locking_mode = NORMAL;
PRAGMA temp_store = MEMORY;
''', db4)
# -
len(dur)
dur.to_sql('blockrun16dur', db4, if_exists='replace', chunksize=25000, index=False)
# +
_br2 = _blockrun16[(_blockrun16.run == _long16.index[0]) & (_blockrun16.blockHeight == 64632)].iloc[:2]
get_deliveries(_sa4, f'{_run1.parent}/{_run1["name"]}',
_br2.iloc[0].line, _br2.iloc[1].line - _br2.iloc[0].line + 1)
# +
# chain_id, vatID, deliveryNum -> blockHeight, kd, compute
import inspect
def provide_deliveries(slogAccess, blockHeight, run, blockrun):
br = blockrun[(blockrun.run == run.name) & (blockrun.blockHeight == blockHeight)]
if len(br) < 2:
return no_deliveries.assign(file_id=-1, chain=-1, blockHeight=blockHeight, run=run.name)
block_start = br.iloc[0] # assert sign == -1?
block_end = br.iloc[1]
length = int(block_end.line - block_start.line + 1)
df = slogAccess.provide_data(run.parent, run['name'], int(block_start.line), length,
f'deliveries-{blockHeight}', no_deliveries,
lambda ref, start, qty: get_deliveries(slogAccess, ref, start, qty),
'gzip')
df = df.assign(chain=run.chain, blockHeight=blockHeight, run=run.name)
if df.dtypes['chain'] not in ['int64', 'float64'] or 'vatID' not in df.columns or 'vd' in df.columns:
raise NotImplementedError(f'cols: {df.columns} dtypes: {df.dtypes} block {blockHeight, int(block_start.line)}, run\n{run}')
return df
df = provide_deliveries(_sa4, 66371, _run1, _blockrun16)
show_times(df)
# -
# Computron rate for just this one block?
df.compute.sum() / df.dur.sum()
# test empty
provide_deliveries(_sa4, 64629, _run1, _blockrun16)
_runs.loc[455:456]
# ## Cranks in one long run starting at agorictest-16 genesis
gen16
df = pd.read_sql("""
with lo as (
select *
, time - blockTime delta
from blockrun16
where blockHeight = 64628
and blockTime = 1625166000
and sign = -1
and run is not null
), hi as (
select run, max(blockHeight) hi, max(blockTime) t_hi
from blockrun16
where run is not null
and sign = -1
group by run
), agg as (
select lo.*, hi.hi, hi.t_hi
from lo join hi on lo.run = hi.run
where abs(delta) < 7
order by hi.t_hi desc
)
select agg.*, run.parent, run.name
from agg
join run on agg.run = run."index"
limit 5
""", db4)
show_times(df, ['time', 'blockTime', 't_hi'])
show_times(_runs).loc[445]
# +
import json
def run1_deliveries(con, sa, lo, hi, run, br,
json_cols=['kd', 'dr'],
table='run1'):
if sqla.inspect(con).has_table(table):
lo = pd.read_sql(f'select max(blockHeight) + 1 lo from {table}', con).iloc[0].lo
if_exists = 'append'
else:
if_exists = 'replace'
for blockHeight in range(lo, hi):
df = provide_deliveries(sa, blockHeight, run, br)
if not len(df):
# log.info('block %d: no deliveries', blockHeight)
continue
for col in json_cols:
df[col] = df[col].apply(json.dumps)
log.info('block %d of %d: %s += %d rows', blockHeight, hi, table, len(df))
df.to_sql(table, con, if_exists=if_exists, index=False)
if_exists = 'append'
run1_deliveries(db4, _sa4, 64628, 75000, _runs.loc[445], _blockrun16)
# run1_deliveries(db4, _sa4, 75000, 90530, _runs.loc[445], _blockrun16, table='run1b')
# -
_run1 = df = pd.read_sql('select * from run1 union all select * from run1b', db4)
show_times(_run1.tail(3))
_run1.blockHeight.describe()
_run1[_run1.blockHeight >= 88296 - 2].sort_values('blockHeight').head(30).drop(columns=['kd', 'dr', 'file_id'])
df = _run1[_run1.blockHeight == 88295].sort_values('dur', ascending=False).drop(columns=['kd', 'dr', 'file_id'])
df.head(10)
df[df.dur >= 1]
# TODO: compare `getPayout` here (in 88295) vs something earlier... same computrons? same duration?
#
# e.g. if harden weakset grew, the duration could grow while keeping computrons constant
_run1[_run1.method == 'getPayout'][['compute', 'dur']].describe()
_run1[_run1.method == 'getPayout'].compute.hist()
_run1[(_run1.method == 'getPayout') & (_run1.compute == 31654)].plot.scatter(x='blockHeight', y='dur')
lg = _run1[_run1.blockHeight > 76000]
lg = lg[lg.dur < 1]
lg[(lg.method == 'getPayout') & (lg.compute == 31654)].plot.scatter(x='blockHeight', y='dur')
# Things got slower over time.
#
# Hypothesis: GC didn't happen -> weak set got big -> weakset access time got big
# So computron model should not be based on this range, but rather on pre-loadgen time.
# When looking at comptron / wallclock, we should look at:
#
# - all getCurrentAmount calls
# - within a narrow range of blockHeight
# - that all use the same # of computrons
#
# (as above)
#
b16time[b16time.delta == 224]
_run1[['compute', 'dur']].describe()
# +
def drate(df):
rate = df.compute / (df.syscalls + 1) / df.dur
# rate = df.compute / df.dur
return df.assign(rate=rate)
df = drate(_run1).groupby('method')[['rate']].aggregate(['count', 'mean', 'std', 'max'])
df = df.sort_values(('rate', 'mean'), ascending=False)
df
# -
common = _run1.groupby('method')[['line']].count()
common = common[common.line > 20]
common
drate(_run1[_run1.method.isin(common.index)])[['method', 'rate']].boxplot(by='method', rot=90, figsize=(20, 12))
common.sort_values('line', ascending=False).head()
_run1.blockHeight.describe()
_run1.sort_values('dur', ascending=False)
# This is an always-busy sim, but **TODO** we'd like to look at the arrival pattern that we have.
# +
def sim(df, c_eg, dur_eg, target):
df = df[df.chain == 16]
df['running'] = df.compute.cumsum() # try exp
threshold = target * (c_eg / dur_eg)
log.info('threshold: %s', threshold)
df['sim_blk'] = (df.running / threshold).round()
# df['adj'] = df.sim_blk - df.blockHeight
return df.reset_index(drop=True)
df = _run1.drop(columns=['type', 'kd', 'dr', 'file_id', 'line', 'run'])
# df = df[df.method != 'executeContract']
# df = df[df.method == 'getCurrentAmount'] # getPayout
# df.blockHeight = df.blockHeight - df.blockHeight.iloc[0]
df = sim(df, 48390.0, 0.074363, 5)
df = df[df.sim_blk.notnull()]
df.sim_blk = df.sim_blk.astype('int64')
show_times(df)
# -
pd.read_sql('''
select count(distinct run)
from blockrun16
''', db4)
len(_runs)
# +
def nth_block(sa, blockHeight, run, blockrun,
ts=gen16.ts[0]):
log.info('%d th block: %s/%s', blockHeight, run.parent, run['name'])
br = blockrun[(blockrun.blockHeight == blockHeight) & (blockrun.run == run.name)]
df = provide_deliveries(sa, blockHeight, run, br)
if not len(df):
return df
df = df.assign(run=run.name, chain=run.chain)
return df
m1b1 = pd.concat(
df
for _, run in _runs.iterrows()
for df in [nth_block(_sa4, 80001, run, _blockrun16)]
if len(df)
)
m1b1
# -
m1b1[(m1b1.method == 'getCurrentAmount') & (m1b1.deliveryNum == 44721)][['compute', 'dur', 'run']]
df = m1b1[(m1b1.method == 'getCurrentAmount') & (m1b1.deliveryNum == 44721)][['compute', 'dur', 'run']]
df.describe()
# ## Validator speed: 2-4x spread for `getCurrentAmount`
df[['dur']].hist()
# +
# df.groupby('method')[['compute']].describe().loc['executeContract']
# -
df.compute.hist(log=True);
df.dur.hist(log=True);
df[df.dur < .1].dur.hist()
# #### Total delivery duration per block
x = pd.concat([
df.groupby('blockHeight')[['dur']].sum(),
df.groupby('sim_blk')[['dur']].sum().rename(columns=dict(dur='dur_sim')),
], axis=1)
x.hist(); # log=True);
x.describe()
x.dur.quantile(.9)
xx = df.groupby('sim_blk')[['dur']].sum().rename(columns=dict(dur='dur_sim'))
xx[xx.dur_sim > 25]
df[df.blockHeight == 88295].sort_values('dur', ascending=False)
df[df.sim_blk == 32607].sort_values('dur', ascending=False)
_run1[_run1.compute == 381240].dur.describe()
_run1[_run1.compute == 381240].plot.scatter(x='blockHeight', y='dur')
# This wasn't a big deal during most of the chain (.25sec 75th percentile).
#
# We could model this within 2x or 3x by ignoring the spike.
# **TODO**: what happened during that spike? is it consensus-observable? kernel-observable?
df = _run1[_run1.compute == 381240]
df[(df.blockHeight >= 88100) & (df.blockHeight < 88400)].plot.scatter(x='blockHeight', y='dur')
df[df.sim_blk == 32607].compute.sum()
df[df.sim_blk == 32607].dur.sum()
df[df.sim_blk == 32607].syscalls.sum()
df.groupby('blockHeight')[['syscalls']].sum().describe()
# #### Total compute per block
x = pd.concat([
df.groupby('blockHeight')[['compute']].sum(),
df.groupby('sim_blk')[['compute']].sum().rename(columns=dict(compute='cmp_sim')),
], axis=1)
x.hist(log=True);
x.describe()
cluster.scale(8)
client.restart()
f'{12:04}'
# +
def pick_chain(ht,
gen=1625166000, hi=16, lo=15):
return np.where(ht > gen, hi, lo)
def run_deliveries(slogs, sa, run):
chain_id = f'agorictest-{run.chain}'
blocks = pd.concat(
pd.read_csv(blockFile)
for blockFile in (slogs / run.parent).glob('*-blocks.csv')
)
blocks = blocks[(blocks.line >= run.line) &
(blocks.line < run.line_end)]
blocks = blocks.assign(run=run.name)
heights = blocks.blockHeight.unique()
log.info('run %s %-3d blocks %.16s %s', run.name, len(heights),
pd.to_datetime(run.time, unit='s'), run['name'])
tot = 0
for blockHeight in heights:
detail = provide_deliveries(sa, blockHeight, run, blocks)
if not len(detail):
continue
tot += len(detail)
yield detail
if not tot:
yield no_deliveries.assign(file_id=-1, chain=-1, blockHeight=-1, run=run.name)
def by_vat(dest, run, detail):
chain_id = f'agorictest-{run.chain}'
run_detail = f'{run.name:04}-{run.parent}-{run.file_id}-{run.line}'
for vatID, g in detail.groupby('vatID'):
try:
(dest / chain_id / vatID).mkdir(parents=True)
except:
pass
vat_dir = dest / chain_id / vatID
f = vat_dir / f'delivery-detail-{run_detail}.csv.gz'
log.info('saving to %s:\n%s', f, g.set_index(['vatID', 'deliveryNum'])[['compute', 'dur']].tail(3))
g.to_csv(f, index=False)
f = vat_dir / f'delivery-summary-{run_detail}.csv.gz'
g[['vatID', 'deliveryNum', 'kd', 'syscalls', 'compute']].to_csv(f, index=False)
return detail.assign(run=run.name).groupby(['run', 'vatID'])[['deliveryNum']].count()
#by_vat(_dir('slogfiles/'), _dir('vat-details/'), _sa4, _runs)
for df in run_deliveries(_dir('slogfiles/'), _sa4, _runs.loc[58]):
print(df)
print(by_vat(_dir('vat-details/'), _runs.loc[58], df))
break
# +
def run_deliveries_todo(sa, slogs, dest, runs):
def do_run(run):
df = pd.concat(
detail
for detail in run_deliveries(slogs, sa, run)
)
return by_vat(dest, run, df)
todo = (
dask.delayed(do_run)(run)
for _, run in runs.iterrows()
)
return todo
per_run = dd.from_delayed(run_deliveries_todo(_sa4, _dir('slogfiles/'), _dir('vat-details/'), _runs))
per_run.compute()
# -
pd.to_datetime(1625213913.1672082, unit='s')
# +
import inspect
from slogdata import show_times
db4.execute('drop table if exists crankrun') #@@
def deliveries_todo(sa, blockrun, runs):
todo = (
dask.delayed(provide_deliveries)(sa, blockHeight, run,
blockrun[(blockrun.run == run.name) &
(blockrun.blockHeight == blockHeight)])
for run_ix, run in runs.iterrows()
for heights in [blockrun[blockrun.run == run_ix].blockHeight.unique()]
for _ in [log.info('run %s %-3d blocks %.16s %s', run_ix, len(heights),
pd.to_datetime(run.time, unit='s'), run['name'])]
for blockHeight in heights
)
log.info('todo: %s', type(todo))
df = dd.from_delayed(todo,
meta=no_deliveries.assign(file_id=1, chain=1, blockHeight=1, run=1))
return df.compute()
# _dr16 = provide_table(
# db4, 'crankrun',
# # 65517
# lambda: deliveries_todo(_sa4, _blockrun16[_blockrun16.blockHeight <= 65000], _runs.loc[200:275]))
_dr16 = deliveries_todo(_sa4, _blockrun16, # [_blockrun16.blockHeight <= 65000]
_runs[_runs.chain == 16])
_dr16
# -
# ## deliveries from batch
_delrun = pd.read_sql('select * from delrun', db4)
_delrun.groupby('chain')[['line']].count()
# ## Are compute meter values consistent?
# +
def compute_meter_consistent(df):
compute_count = df.groupby(['vatID', 'deliveryNum'])[['compute']].nunique()
dups = compute_count[compute_count['compute'] > 1]
return pd.merge(dups.reset_index(),
df[['run', 'vatID', 'deliveryNum', 'compute']],
how='left', suffixes=['_dup', ''],
left_on=['vatID', 'deliveryNum'],
right_on=['vatID', 'deliveryNum'])
# x = compute_meter_consistent(_alld16).compute()
x = compute_meter_consistent(_delrun[_delrun.chain == 16]).sort_values(['vatID', 'deliveryNum']) # .compute()
x
# -
compute_meter_consistent(_delrun[_delrun.chain == 15]).sort_values(['vatID', 'deliveryNum']) # .compute()
# ## Computrons per block
blockdel = _delrun[_delrun.method != 'executeContract']
key = ['chain', 'blockHeight', 'vatID', 'deliveryNum', 'compute']
blockdel = blockdel.sort_values(key).drop_duplicates()
df = blockdel.groupby(['chain', 'blockHeight'])[['deliveryNum']].count().sort_index()
df.plot()
_bkcomp = df = blockdel.groupby(['chain', 'blockHeight'])[['compute']].sum()
df
df.plot()
# +
def type2sign(df):
df['sign'] = np.where(df.type == 'cosmic-swingset-end-block-start', -1, 1)
return df
def byChain(df, gen=gen16.ts[0], hi=16, lo=15):
return df.assign(chain=np.where(df.blockTime >= gen, hi, lo))
return df
def slog_blocks(slogfiles,
pattern='**/*-blocks.csv'):
df = pd.concat(type2sign(pd.read_csv(p)[['type', 'blockHeight', 'blockTime']])
for p in slogfiles.glob(pattern))
df = byChain(df)
key = ['chain', 'blockHeight', 'blockTime']
df = df[key].sort_values(key).drop_duplicates()
return df.reset_index(drop=True)
_blk = slog_blocks(_dir('slogfiles/'))
_blk.tail()
# -
_byChain = _blk.groupby('chain')
df = pd.merge(
_byChain[['blockHeight']].nunique(),
_byChain[['blockHeight']].aggregate(['min', 'max'])['blockHeight'],
left_index=True, right_index=True,
)
df['span'] = df['max'] - df['min'] + 1
df
# +
def blockdur(df):
df = df.set_index(['chain', 'blockHeight'])
df['dur'] = df.shift(-1).blockTime - df.blockTime
return df
_bkdur = blockdur(_blk)
_bkdur
# -
# compute by block with duration
_bkcmpdur = _bkcomp.join(_bkdur, lsuffix='_d', rsuffix='_b')
_bkcmpdur['rate'] = (_bkcmpdur.compute / _bkcmpdur.dur).astype(float)
_bkcmpdur
_bkcmpdur[_bkcmpdur.dur > _bkcmpdur.dur.quantile(0.99)]
df = _bkcmpdur.loc[16]
df[df.dur < 8][['rate']].hist(log=True)
_bkcmpdur[_bkcmpdur.dur < 8][['rate']].describe()
# ## simulation
_delrun.groupby('run')[['line']].count()
_delrun[['crankNum', 'run']].groupby(['crankNum'])[['run']].aggregate(['count']).plot()
# +
def sim(df, percentile):
df = df[df.chain == 16]
df = df[df.method != 'executeContract']
key = ['blockHeight', 'crankNum', 'vatID', 'deliveryNum', 'compute']
df = df.groupby(key)[['dur']].aggregate(['count', 'mean', 'median', 'sum'])
return df
df = df[['blockHeight', 'crankNum', 'vatID', 'deliveryNum', 'compute']].sort_values(
['blockHeight', 'crankNum', 'vatID', 'deliveryNum']).drop_duplicates()
threshold = df.compute.quantile(percentile)
df['running'] = df.compute.cumsum()
df['sim_block'] = (df.running / threshold).round()
return df.reset_index(drop=True)
df = sim(_run1, .99)
df
# -
df[['blockHeight']].plot()
df.set_index('blockHeight')[['sim_block']].plot()
# ## Compute rate by vat
plt.cm.rainbow[1]
pd.Categorical(_delrun.method.dropna(), ordered=True)
# +
import matplotlib as plt
def cmap_of(df, color,
cmap=plt.cm.get_cmap('hot')):
df = df.loc[:, [color]].fillna('???')
byColor = df.groupby(color).count() #.set_index(color)
byColor['unit'] = range(len(byColor))
byColor.unit = byColor.unit / len(byColor)
byColor['color'] = byColor.unit.apply(cmap)
return byColor.loc[df[color]].color
cmap_of(_delrun, 'method')
# +
def vat_rate(df, vatID):
df = df[['vatID', 'deliveryNum', 'compute', 'dur']].dropna()
df['rate'] = df.compute / df.dur
df = df[df.vatID == vatID]
# df = df.groupby('deliveryNum')[['compute', 'dur', 'rate']].mean()
#df.sort_values('dur', ascending=False)
#df
df = df.set_index('deliveryNum').sort_index()
return df
def show_rate(df, vatID, figsize=(8, 9)):
df = vat_rate(df, vatID)
ax = df.plot(subplots=True, figsize=figsize)
def fit_line(df, x, y, color=None, figsize=(9, 6)):
df = df[~df[x].isnull() & ~df[y].isnull()]
cs = np.polyfit(df[x], df[y], 1)
f = np.poly1d(cs)
if color:
color = cmap_of(df, color)
ax1 = df[[x, y]].plot.scatter(x=x, y=y, color=color, figsize=figsize)
df['fit'] = f(df[x])
df.plot(x=x, y='fit', color='Red', legend=False, ax=ax1);
# show_rate(start1, 'v10');
# vat_rate(start1, 'v10').plot.scatter(x='compute', y='dur')
# fastSlog = start1[start1.slogfile == 'PDPnodeTestnet-agorictest16-chain.slog.gz']
# fit_line(vat_rate(fastSlog, 'v10'), 'compute', 'dur')
# len(fastSlog[fastSlog.vatID == 'v10'])
# fastSlog[fastSlog.vatID == 'v10'].drop(['kd', 'dr'], axis=1) #.sort_values('compute', ascending=False)
#fastSlog[fastSlog.vatID == 'v10'].set_index('deliveryNum').sort_index()[['compute', 'dur']].plot(subplots=True)
fit_line(_delrun[_delrun.chain == 16], 'compute', 'dur', color='method')
# -
_r = _delrun[['compute', 'dur', 'method']].assign(rate=_delrun.compute / _delrun.dur)
_r.groupby('method')[['rate']].describe().sort_values(('rate', 'mean'))
df.sort_values(('compute', 'mean'))
df = fastSlog[fastSlog.vatID == 'v10']
df['rate'] = df.compute / df.dur
df[['deliveryNum', 'dur', 'compute', 'rate']].set_index('deliveryNum').plot(subplots=True)
df.rate.describe()
# ### exclude dynamic vat creation
fastSlog.groupby('method')[['compute']].mean().plot.barh(log=True, figsize=(12, 10))
noContract = df =fastSlog[fastSlog.method != 'executeContract'].copy()
df['rate'] = df.compute / df.dur
df[['dur', 'compute', 'rate']].plot(subplots=True)
fit_line(noContract, 'compute', 'dur')
fit_line(fastSlog, 'compute', 'dur')
# ## Add syscalls to the model
df = noContract
cs = np.polyfit(df[['compute', 'syscalls']], df['dur'], 1)
df = _dr16.assign(chain_id=16)
df = df[['chain_id', 'vatID', 'deliveryNum', 'blockHeight', 'kd', 'compute']].drop_duplicates()
df = df.set_index(['chain_id', 'vatID', 'deliveryNum']).sort_index()
df[df.index.duplicated()]
df
df.loc[16].loc['v1'].loc[0]
_dr16.query('(deliveryNum == 0) & (vatID == "v1")').groupby('compute')[['line']].count()
pd.merge(_dr16,
df[df.index.duplicated()].reset_index()[['vatID', 'deliveryNum']],
left_on=['vatID', 'deliveryNum'], right_on=['vatID', 'deliveryNum']
)[['vatID', 'deliveryNum', 'blockHeight', 'kd', 'compute']]
# _dr16.assign(chain_id=16).set_index(['chain_id', 'vatID', 'deliveryNum'])
dall = pd.concat(
pd.read_csv(f)
for f in _dir('slogfiles/').glob('**/*-deliveries-*.csv.gz')
)
dall
# +
def load_deliveries(files, con, table):
if_exists = 'replace'
for file in files:
df = pd.read_csv(file)
df.to_sql(table, con, if_exists=if_exists)
if_exists = 'append'
log.info('loaded %d records from %s', len(df), file)
load_deliveries(
_dir('slogfiles/').glob('**/*-deliveries-*.csv.gz'),
db4,
'delrun3')
# -
# ### Did we ever do more than 1000 cranks in a block?
#
# if not, current policy never fired
df = _dr16[['blockHeight', 'crankNum']].drop_duplicates()
df.groupby('blockHeight')[['crankNum']].count().sort_values('crankNum', ascending=False)
# ## @@ Older approaches
# ## Delivery statistics
#
# > For each delivery in the corpus, we want to get statistics on the range of wallclock times taken by these validators.
# +
import gzip
import itertools
def iter_cranks(path):
"""split each slogfile into runs (each beginning with an import-kernel event),
process each run by finding sequential matching deliver+deliver-result pairs,
turn each pair into a (crankNum, computrons, wallclock) triple
"""
log.info('iter_cranks: %s', path)
with gzip.open(path) as f:
kernel = None
deliver = None
block = None
syscalls = None
for (ix, line) in enumerate(f):
try:
data = json.loads(line)
except json.JSONDecodeError:
log.warning('%s:%d: bad JSON: %s', path.name, ix, repr(line))
continue
ty = data['type']
# print(ix, data['type'], kernel, deliver)
if ty == 'import-kernel-finish':
kernel = data
deliver = None
syscalls = None
yield dict(kernel,
slogfile=path.name, line=ix)
elif ty == 'create-vat':
yield dict(slogfile=path.name,
line=ix,
time=data['time'],
type=ty,
vatID=data['vatID'],
description=data['description'],
managerType=data['managerType'],
time_kernel=kernel['time'])
# {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394}
# {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394}
elif ty == 'cosmic-swingset-end-block-start':
block = data
elif ty == 'cosmic-swingset-end-block-finish':
time = data['time']
time_start = block['time']
dur = time - time_start
if kernel:
time_kernel = kernel['time']
else:
log.warning('%s:%d: missing kernel context', path.name, ix)
time_kernel = np.nan
yield dict(slogfile=path.name,
line=ix,
time=time,
type=ty,
time_start=time_start,
dur=dur,
blockHeight=data['blockHeight'],
blockTime=data['blockTime'],
time_kernel=time_kernel)
block = None
elif deliver is None:
if ty == 'deliver':
deliver = data
syscalls = 0
elif data['type'] == 'deliver-result':
time = data['time']
time_start = deliver['time']
dur = time - time_start
method = deliver['kd'][2]['method'] if deliver['kd'][0] == 'message' else None
compute = data['dr'][2]['compute'] if type(data['dr'][2]) is type({}) else None
if block:
blockHeight = block['blockHeight']
blockTime=block['blockTime']
else:
# odd... how do we get here without block info???
log.warning('%s:%d: missing block context', path.name, ix)
blockHeight = blockTime = np.nan
if kernel:
time_kernel = kernel['time']
else:
log.warning('%s:%d: missing kernel context', path.name, ix)
time_kernel = np.nan
yield dict(slogfile=path.name,
line=ix,
time=time,
type=ty,
crankNum=data['crankNum'],
deliveryNum=data['deliveryNum'],
vatID=data['vatID'],
kd=deliver['kd'],
method=method,
syscalls=syscalls,
dr=data['dr'],
compute=compute,
time_start=time_start,
dur=dur,
blockHeight=blockHeight,
blockTime=blockTime,
time_kernel=time_kernel)
deliver = None
elif ty == 'syscall-result':
syscalls += 1
elif ty in ['clist', 'syscall']:
continue
else:
log.warning("%s:%d: expected deliver-result; got: %s", path.name, ix, ty)
deliver = None
def sample(files=50, cranks=2000, slogdir=slogdir):
return pd.DataFrame.from_records(
r
for slogfile in itertools.islice(slogdir.glob('**/*.slog.gz'), files)
for r in itertools.islice(iter_cranks(slogfile), cranks))
# files_top = sample(200, 100)
c500 = sample()
# -
show_times(
files_top[files_top.crankNum == 1][[
'slogfile', 'line', 'time', 'vatID', 'deliveryNum', 'syscalls', 'compute', 'time_kernel', 'blockHeight']
].sort_values('blockHeight').set_index(['slogfile', 'line']),
['time'])
# +
def show_times(df, cols):
out = df.copy()
for col in cols:
out[col] = pd.to_datetime(out[col], unit='s')
return out
def slogfile_summary(df):
g = df.groupby(['slogfile', 'type'])
out = g[['line']].count()
out['time_min'] = g[['time']].min().time
out['time_max'] = g[['time']].max().time
out['blockHeight_min'] = g[['blockHeight']].min().blockHeight
# out['blockHeight_max'] = g[['blockHeight']].max().blockHeight
out['crankNum_min'] = g[['crankNum']].min().crankNum
return show_times(out, ['time_min', 'time_max'])
slogfile_summary(files_top) # [files_top.type == 'deliver-result']).sort_values('crankNum_min', ascending=False).head(15)
# +
def stuff(df, slogfile):
return df[(df.slogfile==slogfile) &
(df.type == 'deliver-result')][['crankNum', 'vatID', 'deliveryNum', 'kd', 'line', 'blockHeight' ]]
coolex = stuff(c500, 'coolex-agorictest16-chain.slog.gz').set_index('crankNum')
mym = stuff(c500, 'mymoniker-agorictest16-chain.slog.gz').set_index('crankNum')
xwalk = pd.merge(coolex, mym, left_index=True, right_index=True)
xwalk[xwalk.kd_x != xwalk.kd_y]
# -
xwalk[xwalk.deliveryNum_y == 2801].kd_y.iloc[0]
# warner says: suppose we have 2 deliverInboundAcks
#
# when swingset tells mb device, device consults state _in RAM_ for dup ack num...
# not durable... differs between run-from-start and restart
# ## global crankNum -> vatID, deliveryNum
cranks = c500[c500['type'] == 'deliver-result']
cranks = cranks[['chain_id', 'crankNum', 'vatID', 'deliveryNum']].set_index(['chain_id', 'crankNum']).drop_duplicates().sort_index()
cranks # .sort_values('deliveryNum')
c500 = c500[~c500.line.isnull()]
show_times(c500[c500.blockHeight == 64628], ['time', 'time_start', 'blockTime'])
cranks.pivot(columns='vatID', values='deliveryNum')
cranks.plot(subplots=True)
c500[['kd']].dropna()
c500[['compute']].dropna()
# +
## reduced data set
# chain-wide deliveries
# chain_id, crankNum -> blockHeight, vatID, deliveryNum, kd, compute
# chain_id, vatID, deliveryNum -> blockHeight, kd, compute
# except vatTP?
# per-validator data
# chain_id, crankNum, run (slogfile, kernel-start) -> dur
# +
# global crankNum -> vatID, deliveryNum
c500[['crankNum', 'vatID', 'deliveryNum']].set_index()
# ignore un-full blocks?
# histogram of block durations; interval between...
# {"time":1625059432.2093444,"type":"cosmic-swingset-end-block-start","blockHeight":58394,"blockTime":1625059394}
# {"time":1625059432.2096362,"type":"cosmic-swingset-end-block-finish","blockHeight":58394,"blockTime":1625059394}
# "blockTime":1625059381 <- consensus block time is median of block times (?)
# vatID, deliveryNum -> args / syscalls
# watch out for GC esp.
# c.run(runPolicy)
# simple model: kernel says how many computrons
# refinement: computrons, syscalls
# fitness: block distribution... 10s blocks...
# blocks that aren't too big (latency, validator variance risk)
# cpu that isn't idle (throughput)
# an ideal: median block time 10s
# 80 20 %ile
# importing a contract is an outlier
# +
# median validator - existing distribution of deliveries / compute -> blocks
# supplement: study wallclock stuff
# -
show_times(c500[c500['type'] == 'deliver-result'].set_index(['crankNum', 'vatID', 'deliveryNum', 'slogfile'])
.drop(['type', 'kd', 'dr', 'time_dr', 'description', 'managerType'], axis=1).sort_index(),
['time', 'time_kernel', 'blockTime'])
# ### Missing `compute` meter info?
start1 = c500
start1[(start1['type'] == 'deliver-result') & start1.compute.isnull()]
compute_ref = start1[(start1.slogfile == 'coolex-agorictest16-chain.slog.gz') &
(start1['type'] == 'deliver-result')].set_index('crankNum')[['compute']]
compute_ref
compute_delta = start1[['slogfile', 'crankNum', 'compute']]
compute_delta = pd.merge(compute_delta, compute_ref,
left_on='crankNum', right_index=True, suffixes=['', '_ref'])
compute_delta['delta'] = (compute_delta.compute - compute_delta.compute_ref).abs()
compute_delta.sort_values('delta', ascending=False)
# +
df = start1
categories = df.vatID.apply(lambda v: int(v[1:]))
colors = cm.rainbow(np.linspace(0, 1, categories.max() + 1))
df.plot.scatter(x='compute', y='dur', c=colors[categories],
title='Deliveries (colored by vatID)',
figsize=(12, 9), ylabel="dur (sec)");
# -
start1[~start1.compute.isnull()].groupby('vatID')[['crankNum']].count().sort_values('crankNum', ascending=False)
# +
def vat_rate(df, vatID):
df = df[['vatID', 'deliveryNum', 'compute', 'dur']].dropna()
df['rate'] = df.compute / df.dur
df = df[df.vatID == vatID]
# df = df.groupby('deliveryNum')[['compute', 'dur', 'rate']].mean()
#df.sort_values('dur', ascending=False)
#df
df = df.set_index('deliveryNum').sort_index()
return df
def show_rate(df, vatID, figsize=(8, 9)):
df = vat_rate(df, vatID)
ax = df.plot(subplots=True, figsize=figsize)
def fit_line(df, x, y, figsize=(9, 6)):
cs = np.polyfit(df[x], df[y], 1)
f = np.poly1d(cs)
ax1 = df[[x, y]].plot.scatter(x=x, y=y, figsize=figsize)
df['fit'] = f(df[x])
df.plot(x=x, y='fit', color='Red', legend=False, ax=ax1);
# show_rate(start1, 'v10');
# vat_rate(start1, 'v10').plot.scatter(x='compute', y='dur')
fastSlog = start1[start1.slogfile == 'PDPnodeTestnet-agorictest16-chain.slog.gz']
fit_line(vat_rate(fastSlog, 'v10'), 'compute', 'dur')
# len(fastSlog[fastSlog.vatID == 'v10'])
# fastSlog[fastSlog.vatID == 'v10'].drop(['kd', 'dr'], axis=1) #.sort_values('compute', ascending=False)
#fastSlog[fastSlog.vatID == 'v10'].set_index('deliveryNum').sort_index()[['compute', 'dur']].plot(subplots=True)
# -
vat_rate(start1, 'v16');
df = start1.pivot(columns='vatID', values=['compute', 'dur'],
index=['vatID', 'deliveryNum', 'crankNum', 'slogfile', 'line'])
df.reset_index().set_index('deliveryNum').drop(['crankNum', 'line'], axis=1) #.plot(figsize=(12, 8));
df.reset_index().set_index('deliveryNum')[['v23']].sort_index().dropna() #.plot()
df.describe()
df[['v14']].dropna()
df.crankNum.hist();
df.deliveryNum.hist();
df.groupby('method')[['compute', 'rate']].describe()
df.groupby('method')[['rate', 'compute', 'dur']].mean().sort_values('rate').head(90).plot(
subplots=True, rot=90, figsize=(8, 6), title='Method Compute Cost, Rate: bottom 90');
df.groupby('method')[['rate', 'compute', 'dur']].mean().sort_values('rate').tail(8).plot(
subplots=True, rot=90, figsize=(8, 6), title='Method Compute Cost, Rate: top 8');
durByMethod.dur.sum()
# +
durByMethod = df.groupby('method')[['dur']].sum().sort_values('dur', ascending=False)
durByMethod.plot.pie(y='dur', figsize=(12, 9), autopct='%1.1f%%')
# -
df.groupby('vatID')[['rate']].describe().head(20)
df.groupby('slogfile')[['rate']].describe().head(20)
df.plot.scatter(x='deliveryNum', y='rate')
speed = df.groupby('slogfile')[['rate']].describe()[['rate'][0]][['count', 'mean', 'std']]
speed = speed.sort_values('mean', ascending=False)
speed['relative'] = speed['mean'] / speed['mean'][0]
speed
# +
def boxplot_sorted(df, by, column, **config):
df2 = pd.DataFrame({col:vals[column] for col, vals in df.groupby(by)})
meds = df2.median().sort_values()
return df2[meds.index].boxplot(**config)
ax = boxplot_sorted(df, by=["slogfile"], column="rate", rot=90, figsize=(12, 9))
ax.set_title('Validator Speed: Sample of 20 from Phase 4');
ax.set_ylabel('computrons / sec')
# -
ax = df.sort_values('crankNum').plot.scatter(x='crankNum', y='compute');
ax.set_yscale('log')
df[(df.dur < df.dur.mean() + df.dur.std()) &
(df.compute < df.compute.mean() + df.compute.std())][['compute', 'dur']].hist();
# +
df = crank_info(c500)
df = df[df.crankNum.isin(compute_ref.index)]
rate = np.polyfit(df.compute, df.dur, 1)
f = np.poly1d(rate)
df['rate'] = f(df.compute)
# df[['compute', 'dur', 'rate']].head()
print(f)
# -
ax1 = df[['compute', 'dur']].plot.scatter(x='compute', y='dur', figsize=(9, 6))
df.plot(x='compute', y='rate', color='Red', legend=False, ax=ax1);
ax1.set_title(f"{len(df)} cranks from w3m: Duration vs. Compute Meter");
ax1.set_xlabel("compute units")
ax1.set_ylabel("duration (sec)")
r = df.compute / df.dur
r.max() / r.min()
df.sort_values('rate', ascending=False).drop(['time', 'type', 'detail', 'detail_dr'], axis=1)
# ## Colophon: jupytext
#
# This is a jupyter notebook paired with a python script using [jupytext](https://jupytext.readthedocs.io/en/latest/).
#
# We use the [python38Packages.jupytext](https://search.nixos.org/packages?channel=21.05&from=0&size=50&buckets=%7B%22package_attr_set%22%3A%5B%22python38Packages%22%5D%2C%22package_license_set%22%3A%5B%5D%2C%22package_maintainers_set%22%3A%5B%5D%2C%22package_platforms%22%3A%5B%5D%7D&sort=relevance&query=jupytext) nix package; in particular, `/nix/store/a9911qj06dy0ah7fshl39x3w4cjs7bxk-python3.8-jupytext-1.11.2`.
#
| 30.668827 | 417 | 0.625632 | [
"CC0-1.0"
] | Agoric/testnet-notes | nb4/slogfiles.py | 52,047 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the CPIO extracted file-like object."""
from __future__ import unicode_literals
import unittest
from dfvfs.file_io import cpio_file_io
from dfvfs.path import cpio_path_spec
from dfvfs.path import os_path_spec
from dfvfs.resolver import context
from tests.file_io import test_lib
class CPIOBinaryFileTest(test_lib.SylogTestCase):
"""The unit test for a CPIO extracted file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(CPIOBinaryFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.bin.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(
location='/syslog', parent=path_spec)
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
class CPIOPortableASCIIFileTest(test_lib.SylogTestCase):
"""The unit test for a CPIO extracted file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(CPIOPortableASCIIFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.odc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(
location='/syslog', parent=path_spec)
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
class CPIONewASCIIFileTest(test_lib.SylogTestCase):
"""The unit test for a CPIO extracted file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(CPIONewASCIIFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.newc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(
location='/syslog', parent=path_spec)
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
class CPIONewASCIIFileWithChecksumTest(test_lib.SylogTestCase):
"""The unit test for a CPIO extracted file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(CPIONewASCIIFileWithChecksumTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.crc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(
location='/syslog', parent=path_spec)
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
if __name__ == '__main__':
unittest.main()
| 31.080214 | 75 | 0.75086 | [
"Apache-2.0"
] | Acidburn0zzz/dfvfs | tests/file_io/cpio_file_io.py | 5,812 | Python |
import time
import json
from anchore_engine.subsys import logger
def get_docker_registry_userpw(registry_record):
user = pw = None
try:
if 'registry_type' in registry_record and registry_record['registry_type'] == 'awsecr':
try:
ecr_creds = json.loads(registry_record['registry_meta'])
except Exception as err:
raise Exception("cannot access/parse registry metadata for awsecr registry type - exception: {}".format(str(err)))
docker_auth_token = ecr_creds['authorizationToken']
user, pw = docker_auth_token.split(":", 1)
else:
user = registry_record['registry_user']
pw = registry_record['registry_pass']
except Exception as err:
logger.error("cannot fetch registry creds from registry record - exception: " + str(err))
raise err
return user, pw
def get_creds_by_registry(registry, repository, registry_creds=None):
user = pw = registry_verify = None
if registry_creds:
try:
registry_creds.sort(key=lambda x: len(x['registry']), reverse=True)
for registry_record in registry_creds:
if registry_record_matches(registry_record['registry'], registry, repository):
if registry_record['record_state_key'] not in ['active']:
try:
last_try = int(registry_record['record_state_val'])
except:
last_try = 0
if (int(time.time()) - last_try) < 60:
logger.debug("SKIPPING REGISTRY ATTEMPT: " + str(registry_record['record_state_key']))
raise Exception("registry not available - " + str(registry_record['record_state_key']))
user, pw = get_docker_registry_userpw(registry_record)
registry_verify = registry_record['registry_verify']
break
except Exception as err:
raise err
return user, pw, registry_verify
def registry_record_matches(registry_record_str, registry, repository):
"""
:param registry_record_str: the string with optional wildcard to match against a the registry/repository combo
:param registry: the registry to match against
:param repository: the repository to match against
:return: bool true if a match, false if not
"""
return (registry_record_str[-1] == '*' and '{}/{}'.format(registry, repository).startswith(registry_record_str[:-1])) or ('/' in registry_record_str and registry_record_str == '{}/{}'.format(registry, repository)) or (registry_record_str == registry)
| 42.421875 | 254 | 0.628729 | [
"Apache-2.0"
] | Mattlk13/anchore-engine | anchore_engine/auth/common.py | 2,715 | Python |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import defaultdict
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
import numpy as np
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.optimization_config import OptimizationConfig
from ax.core.outcome_constraint import ScalarizedOutcomeConstraint
from ax.core.search_space import SearchSpace
from ax.modelbridge.transforms.base import Transform
from ax.modelbridge.transforms.utils import get_data, match_ci_width_truncated
from ax.models.types import TConfig
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast_list
from sklearn.preprocessing import PowerTransformer
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import modelbridge as modelbridge_module # noqa F401 # pragma: no cover
logger = get_logger(__name__)
class PowerTransformY(Transform):
"""Transform the values to look as normally distributed as possible.
This fits a power transform to the data with the goal of making the transformed
values look as normally distributed as possible. We use Yeo-Johnson
(https://www.stat.umn.edu/arc/yjpower.pdf), which can handle both positive and
negative values.
While the transform seems to be quite robust, it probably makes sense to apply a
bit of winsorization and also standardize the inputs before applying the power
transform. The power transform will automatically standardize the data so the
data will remain standardized.
The transform can't be inverted for all values, so we apply clipping to move
values to the image of the transform. This behavior can be controlled via the
`clip_mean` setting.
"""
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
modelbridge: Optional[modelbridge_module.base.ModelBridge] = None,
config: Optional[TConfig] = None,
) -> None:
if config is None:
raise ValueError("PowerTransform requires a config.")
# pyre-fixme[6]: Same issue as for LogY
metric_names = list(config.get("metrics", []))
if len(metric_names) == 0:
raise ValueError("Must specify at least one metric in the config.")
self.clip_mean = config.get("clip_mean", True)
self.metric_names = metric_names
Ys = get_data(observation_data=observation_data, metric_names=metric_names)
self.power_transforms = _compute_power_transforms(Ys=Ys)
self.inv_bounds = _compute_inverse_bounds(self.power_transforms, tol=1e-10)
def transform_observation_data(
self,
observation_data: List[ObservationData],
observation_features: List[ObservationFeatures],
) -> List[ObservationData]:
"""Winsorize observation data in place."""
for obsd in observation_data:
for i, m in enumerate(obsd.metric_names):
if m in self.metric_names:
transform = self.power_transforms[m].transform
obsd.means[i], obsd.covariance[i, i] = match_ci_width_truncated(
mean=obsd.means[i],
variance=obsd.covariance[i, i],
transform=lambda y: transform(np.array(y, ndmin=2)),
lower_bound=-np.inf,
upper_bound=np.inf,
)
return observation_data
def untransform_observation_data(
self,
observation_data: List[ObservationData],
observation_features: List[ObservationFeatures],
) -> List[ObservationData]:
"""Winsorize observation data in place."""
for obsd in observation_data:
for i, m in enumerate(obsd.metric_names):
if m in self.metric_names:
l, u = self.inv_bounds[m]
transform = self.power_transforms[m].inverse_transform
if not self.clip_mean and (obsd.means[i] < l or obsd.means[i] > u):
raise ValueError(
"Can't untransform mean outside the bounds without clipping"
)
obsd.means[i], obsd.covariance[i, i] = match_ci_width_truncated(
mean=obsd.means[i],
variance=obsd.covariance[i, i],
transform=lambda y: transform(np.array(y, ndmin=2)),
lower_bound=l,
upper_bound=u,
clip_mean=True,
)
return observation_data
def transform_optimization_config(
self,
optimization_config: OptimizationConfig,
modelbridge: Optional[modelbridge_module.base.ModelBridge],
fixed_features: ObservationFeatures,
) -> OptimizationConfig:
for c in optimization_config.all_constraints:
if isinstance(c, ScalarizedOutcomeConstraint):
c_metric_names = [metric.name for metric in c.metrics]
intersection = set(c_metric_names) & set(self.metric_names)
if intersection:
raise NotImplementedError(
f"PowerTransformY cannot be used for metric(s) {intersection} "
"that are part of a ScalarizedOutcomeConstraint."
)
elif c.metric.name in self.metric_names:
if c.relative:
raise ValueError(
f"PowerTransformY cannot be applied to metric {c.metric.name} "
"since it is subject to a relative constraint."
)
else:
transform = self.power_transforms[c.metric.name].transform
c.bound = transform(np.array(c.bound, ndmin=2)).item()
return optimization_config
def _compute_power_transforms(
Ys: Dict[str, List[float]]
) -> Dict[str, PowerTransformer]:
"""Compute power transforms."""
power_transforms = {}
for k, ys in Ys.items():
y = np.array(ys)[:, None] # Need to unsqueeze the last dimension
pt = PowerTransformer(method="yeo-johnson").fit(y)
power_transforms[k] = pt
return power_transforms
def _compute_inverse_bounds(
power_transforms: Dict[str, PowerTransformer], tol: float = 1e-10
) -> Dict[str, Tuple[float, float]]:
"""Computes the image of the transform so we can clip when we untransform.
The inverse of the Yeo-Johnson transform is given by:
if X >= 0 and lambda == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda != 0:
X = (X_trans * lambda + 1) ** (1 / lambda) - 1
elif X < 0 and lambda != 2:
X = 1 - (-(2 - lambda) * X_trans + 1) ** (1 / (2 - lambda))
elif X < 0 and lambda == 2:
X = 1 - exp(-X_trans)
We can break this down into three cases:
lambda < 0: X < -1 / lambda
0 <= lambda <= 2: X is unbounded
lambda > 2: X > 1 / (2 - lambda)
Sklearn standardizes the transformed values to have mean zero and standard
deviation 1, so we also need to account for this when we compute the bounds.
"""
inv_bounds = defaultdict()
for k, pt in power_transforms.items():
bounds = [-np.inf, np.inf]
mu, sigma = pt._scaler.mean_.item(), pt._scaler.scale_.item() # pyre-ignore
lambda_ = pt.lambdas_.item() # pyre-ignore
if lambda_ < -1 * tol:
bounds[1] = (-1.0 / lambda_ - mu) / sigma
elif lambda_ > 2.0 + tol:
bounds[0] = (1.0 / (2.0 - lambda_) - mu) / sigma
inv_bounds[k] = tuple(checked_cast_list(float, bounds))
return inv_bounds
| 43 | 88 | 0.630767 | [
"MIT"
] | danielcohenlive/Ax-1 | ax/modelbridge/transforms/power_transform_y.py | 8,041 | Python |
#
# Copyright 2019 Jonas Berg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
.. moduleauthor:: Jonas Berg
dummy_serial: A dummy/mock implementation of a serial port for testing purposes.
"""
__author__ = "Jonas Berg"
__license__ = "Apache License, Version 2.0"
import sys
import time
DEFAULT_TIMEOUT = 0.01
"""The default timeot value in seconds. Used if not set by the constructor."""
DEFAULT_BAUDRATE = 19200
"""The default baud rate. Used if not set by the constructor."""
VERBOSE = False
"""Set this to :const:`True` for printing the communication, and also details on the port initialization.
Might be monkey-patched in the calling test module.
"""
RESPONSES = {}
"""A dictionary of respones from the dummy serial port.
The key is the message (string) sent to the dummy serial port, and the item is the response (string)
from the dummy serial port.
Intended to be monkey-patched in the calling test module.
"""
RESPONSES["EXAMPLEREQUEST"] = "EXAMPLERESPONSE"
DEFAULT_RESPONSE = "NotFoundInResponseDictionary"
"""Response when no matching message (key) is found in the look-up dictionary.
Should not be an empty string, as that is interpreted as "no data available on port".
Might be monkey-patched in the calling test module.
"""
NO_DATA_PRESENT = ""
class Serial:
"""Dummy (mock) serial port for testing purposes.
Mimics the behavior of a serial port as defined by the `pySerial <https://github.com/pyserial/pyserial>`_ module.
Args:
* port:
* timeout:
Note:
As the portname argument not is used properly, only one port on :mod:`dummy_serial` can be used simultaneously.
"""
def __init__(self, *args, **kwargs):
self._waiting_data = NO_DATA_PRESENT
self._isOpen = True
self.port = kwargs["port"] # Serial port name.
self._initial_port_name = self.port # Initial name given to the serial port
try:
self.timeout = kwargs["timeout"]
except:
self.timeout = DEFAULT_TIMEOUT
try:
self.baudrate = kwargs["baudrate"]
except:
self.baudrate = DEFAULT_BAUDRATE
if VERBOSE:
_print_out("\nDummy_serial: Initializing")
_print_out("dummy_serial initialization args: " + repr(args))
_print_out("dummy_serial initialization kwargs: " + repr(kwargs) + "\n")
def __repr__(self):
"""String representation of the dummy_serial object"""
return "{0}.{1}<id=0x{2:x}, open={3}>(port={4!r}, timeout={5!r}, waiting_data={6!r})".format(
self.__module__,
self.__class__.__name__,
id(self),
self._isOpen,
self.port,
self.timeout,
self._waiting_data,
)
@property
def is_open(self):
return self._isOpen
def reset_input_buffer(self):
pass
def reset_output_buffer(self):
pass
def open(self):
"""Open a (previously initialized) port on dummy_serial."""
if VERBOSE:
_print_out("\nDummy_serial: Opening port\n")
if self._isOpen:
raise IOError("Dummy_serial: The port is already open")
self._isOpen = True
self.port = self._initial_port_name
def close(self):
"""Close a port on dummy_serial."""
if VERBOSE:
_print_out("\nDummy_serial: Closing port\n")
if not self._isOpen:
raise IOError("Dummy_serial: The port is already closed")
self._isOpen = False
self.port = None
def write(self, inputdata):
"""Write to a port on dummy_serial.
Args:
inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response
for subsequent read operations.
Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.
"""
if VERBOSE:
_print_out(
"\nDummy_serial: Writing to port. Given:" + repr(inputdata) + "\n"
)
if sys.version_info[0] > 2:
if not type(inputdata) == bytes:
raise TypeError(
"The input must be type bytes. Given:" + repr(inputdata)
)
inputstring = str(inputdata, encoding="latin1")
else:
inputstring = inputdata
if not self._isOpen:
raise IOError(
"Dummy_serial: Trying to write, but the port is not open. Given:"
+ repr(inputdata)
)
# Look up which data that should be waiting for subsequent read commands
try:
response = RESPONSES[inputstring]
except:
response = DEFAULT_RESPONSE
self._waiting_data = response
def read(self, numberOfBytes):
"""Read from a port on dummy_serial.
The response is dependent on what was written last to the port on dummy_serial,
and what is defined in the :data:`RESPONSES` dictionary.
Args:
numberOfBytes (int): For compability with the real function.
Returns a **string** for Python2 and **bytes** for Python3.
If the response is shorter than numberOfBytes, it will sleep for timeout.
If the response is longer than numberOfBytes, it will return only numberOfBytes bytes.
"""
if VERBOSE:
_print_out(
"\nDummy_serial: Reading from port (max length {!r} bytes)".format(
numberOfBytes
)
)
if numberOfBytes < 0:
raise IOError(
"Dummy_serial: The numberOfBytes to read must not be negative. Given: {!r}".format(
numberOfBytes
)
)
if not self._isOpen:
raise IOError("Dummy_serial: Trying to read, but the port is not open.")
# Do the actual reading from the waiting data, and simulate the influence of numberOfBytes
if self._waiting_data == DEFAULT_RESPONSE:
returnstring = self._waiting_data
elif numberOfBytes == len(self._waiting_data):
returnstring = self._waiting_data
self._waiting_data = NO_DATA_PRESENT
elif numberOfBytes < len(self._waiting_data):
if VERBOSE:
_print_out(
"Dummy_serial: The numberOfBytes to read is smaller than the available data. "
+ "Some bytes will be kept for later. Available data: {!r} (length = {}), numberOfBytes: {}".format(
self._waiting_data, len(self._waiting_data), numberOfBytes
)
)
returnstring = self._waiting_data[:numberOfBytes]
self._waiting_data = self._waiting_data[numberOfBytes:]
else: # Wait for timeout, as we have asked for more data than available
if VERBOSE:
_print_out(
"Dummy_serial: The numberOfBytes to read is larger than the available data. "
+ "Will sleep until timeout. Available data: {!r} (length = {}), numberOfBytes: {}".format(
self._waiting_data, len(self._waiting_data), numberOfBytes
)
)
time.sleep(self.timeout)
returnstring = self._waiting_data
self._waiting_data = NO_DATA_PRESENT
# TODO Adapt the behavior to better mimic the Windows behavior
if VERBOSE:
_print_out(
"Dummy_serial read return data: {!r} (has length {})\n".format(
returnstring, len(returnstring)
)
)
if sys.version_info[0] > 2: # Convert types to make it python3 compatible
return bytes(returnstring, encoding="latin1")
else:
return returnstring
def _print_out(inputstring):
"""Print the inputstring. To make it compatible with Python2 and Python3."""
sys.stdout.write(inputstring + "\n")
| 32.866412 | 120 | 0.61166 | [
"Apache-2.0"
] | edgar-bonet/minimalmodbus | dummy_serial.py | 8,611 | Python |
#!/usr/bin/env python
import os
from setuptools import setup
from setuptools import find_packages
import sys
from financialdatapy import __version__ as VERSION
# 'setup.py publish' shortcut.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
description = 'Extract financial data of a company.'
with open('README.md', 'r') as f:
long_description = f.read()
install_requires = [
'pandas>=1.4.0',
'requests>=2.27.1',
'xmltodict>=0.12.0',
'python-dotenv>=0.19.2',
'beautifulsoup4>=4.10.0',
'lxml>=4.7.1',
'user_agent>=0.1.10',
]
project_urls = {
'Source': 'https://github.com/choi-jiwoo/financialdatapy',
}
setup(
name='financialdatapy',
version=VERSION,
author='Choi Jiwoo',
author_email='[email protected]',
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
python_requires='>=3.10',
keywords=['python', 'stock', 'finance'],
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
project_urls=project_urls,
)
| 25.928571 | 62 | 0.660468 | [
"MIT"
] | choi-jiwoo/financialdatapy | setup.py | 1,452 | Python |
# IMPORT MANAGEMENT
try:
import gevent.monkey
except ModuleNotFoundError:
import os
os.system('pip install -r requirements.txt')
import gevent.monkey
gevent.monkey.patch_all() # patch everything
import colorama
colorama.init(autoreset=True)
import discord.commands
import asyncio
import discord
import dotenv
import os
# IMPORTS
from discord.ext import commands
from cogs.helpers import config, management
from discord_together import DiscordTogether
# SETTINGS
COLOR = config.load()['color-primary']
TESTING_MODE = management.testing_mode()
PREFIX = '//'
# SETUP
dotenv.load_dotenv() # initialize virtual environment
token = os.getenv('DISCORD_TOKEN')
client = commands.Bot(command_prefix=PREFIX, intents=discord.Intents.all())
async def status_task():
while True:
await client.change_presence(activity=discord.Game(f'v0.5・open source'))
@client.event
async def on_ready():
management.set_start_time()
if management.testing_mode():
await client.change_presence(status=discord.Status.idle)
print(colorama.Fore.GREEN + 'ONLINE as', client.user)
client.togetherControl = await DiscordTogether(token)
client.loop.create_task(status_task())
# load cogs
# credit: https://youtu.be/vQw8cFfZPx0
for filename in os.listdir(os.getcwd() + '/src/cogs/'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
try:
client.run(token) # run bot with the token set in the .env file
except:
print(colorama.Fore.RED + 'Unable to run the client. Please check your bot token.') | 25.754098 | 87 | 0.741566 | [
"MIT"
] | nsde/novalix | src/bot.py | 1,573 | Python |
from dataclasses import dataclass, field
from typing import List, Optional
@dataclass
class Regex:
att: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"pattern": r"\p{IsSpacingModifierLetters}+",
}
)
@dataclass
class Doc:
class Meta:
name = "doc"
elem: List[Regex] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
}
)
| 17.785714 | 56 | 0.53012 | [
"MIT"
] | tefra/xsdata-w3c-tests | output/models/ms_data/regex/re_l6_xsd/re_l6.py | 498 | Python |
from __future__ import unicode_literals, division, absolute_import
import logging
import re
from datetime import datetime
from sqlalchemy import Column, Unicode, Integer
from flexget import plugin
from flexget.event import event
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.titles.series import name_to_re
from flexget.db_schema import versioned_base
log = logging.getLogger('pogcal_acquired')
Base = versioned_base('pogcal_acquired', 0)
session = requests.Session(max_retries=3)
class PogcalShow(Base):
__tablename__ = 'pogcal_shows'
id = Column(Integer, primary_key=True, autoincrement=False, nullable=False)
name = Column(Unicode)
class PogcalAcquired(object):
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'}
},
'required': ['username', 'password'],
'additionalProperties': False
}
@plugin.priority(-255)
def on_task_output(self, task, config):
if not task.accepted and not task.options.test:
return
try:
result = session.post('http://www.pogdesign.co.uk/cat/',
data={'username': config['username'],
'password': config['password'],
'sub_login': 'Account Login'})
except requests.RequestException as e:
log.error('Error logging in to pog calendar: %s' % e)
return
if 'logout' not in result.text:
log.error('Username/password for pogdesign calendar appear to be incorrect.')
return
elif task.options.test:
log.verbose('Successfully logged in to pogdesign calendar.')
for entry in task.accepted:
if not entry.get('series_name') or not entry.get('series_id_type') == 'ep':
continue
show_id = self.find_show_id(entry['series_name'], task.session)
if not show_id:
log.debug('Could not find pogdesign calendar id for `%s`' % entry['series_name'])
continue
if task.options.test:
log.verbose('Would mark %s %s in pogdesign calenadar.' % (entry['series_name'], entry['series_id']))
continue
else:
log.verbose('Marking %s %s in pogdesign calenadar.' % (entry['series_name'], entry['series_id']))
shid = '%s-%s-%s/%s-%s' % (show_id, entry['series_season'], entry['series_episode'],
datetime.now().month, datetime.now().year)
try:
session.post('http://www.pogdesign.co.uk/cat/watchhandle',
data={'watched': 'adding', 'shid': shid})
except requests.RequestException as e:
log.error('Error marking %s %s in pogdesign calendar: %s' %
(entry['series_name'], entry['series_id'], e))
def find_show_id(self, show_name, db_sess):
# Check if we have this show id cached
show_name = show_name.lower()
db_show = db_sess.query(PogcalShow).filter(PogcalShow.name == show_name).first()
if db_show:
return db_show.id
try:
page = session.get('http://www.pogdesign.co.uk/cat/showselect.php')
except requests.RequestException as e:
log.error('Error looking up show show list from pogdesign calendar: %s' % e)
return
# Try to find the show id from pogdesign show list
show_re = name_to_re(None, show_name)
soup = get_soup(page.content)
search = re.compile(show_re, flags=re.I)
show = soup.find(text=search)
if show:
id = int(show.previous['value'])
db_sess.add(PogcalShow(id=id, name=show_name))
return id
else:
log.verbose('Could not find pogdesign calendar id for show `%s`' % show_re)
@event('plugin.register')
def register_plugin():
plugin.register(PogcalAcquired, 'pogcal_acquired', api_ver=2)
| 40.72549 | 116 | 0.596052 | [
"MIT"
] | RSully/flexget-flexget | flexget/plugins/services/pogcal_acquired.py | 4,154 | Python |
import copy
import datetime
import decimal
import inspect
import json
import logging
import traceback
import uuid
import warnings
from collections import Counter, defaultdict, namedtuple
from collections.abc import Hashable
from functools import wraps
from typing import List
from dateutil.parser import parse
from great_expectations import __version__ as ge_version
from great_expectations.core.evaluation_parameters import build_evaluation_parameters
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.core.expectation_suite import (
ExpectationSuite,
expectationSuiteSchema,
)
from great_expectations.core.expectation_validation_result import (
ExpectationSuiteValidationResult,
ExpectationValidationResult,
)
from great_expectations.core.id_dict import BatchKwargs
from great_expectations.core.run_identifier import RunIdentifier
from great_expectations.data_asset.util import (
parse_result_format,
recursively_convert_to_json_serializable,
)
from great_expectations.exceptions import GreatExpectationsError
from great_expectations.marshmallow__shade import ValidationError
logger = logging.getLogger(__name__)
logging.captureWarnings(True)
class DataAsset:
# This should in general only be changed when a subclass *adds expectations* or *changes expectation semantics*
# That way, multiple backends can implement the same data_asset_type
_data_asset_type = "DataAsset"
def __init__(self, *args, **kwargs):
"""
Initialize the DataAsset.
:param profiler (profiler class) = None: The profiler that should be run on the data_asset to
build a baseline expectation suite.
Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a
Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments
so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of
*args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the
support for the profiler parameter not obvious from the signature.
"""
interactive_evaluation = kwargs.pop("interactive_evaluation", True)
profiler = kwargs.pop("profiler", None)
expectation_suite = kwargs.pop("expectation_suite", None)
expectation_suite_name = kwargs.pop("expectation_suite_name", None)
data_context = kwargs.pop("data_context", None)
batch_kwargs = kwargs.pop(
"batch_kwargs", BatchKwargs(ge_batch_id=str(uuid.uuid1()))
)
batch_parameters = kwargs.pop("batch_parameters", {})
batch_markers = kwargs.pop("batch_markers", {})
if "autoinspect_func" in kwargs:
warnings.warn(
"Autoinspect_func is no longer supported; use a profiler instead (migration is easy!).",
category=DeprecationWarning,
)
super().__init__(*args, **kwargs)
self._config = {"interactive_evaluation": interactive_evaluation}
self._initialize_expectations(
expectation_suite=expectation_suite,
expectation_suite_name=expectation_suite_name,
)
self._data_context = data_context
self._batch_kwargs = BatchKwargs(batch_kwargs)
self._batch_markers = batch_markers
self._batch_parameters = batch_parameters
# This special state variable tracks whether a validation run is going on, which will disable
# saving expectation config objects
self._active_validation = False
if profiler is not None:
profiler.profile(self)
if data_context and hasattr(data_context, "_expectation_explorer_manager"):
self.set_default_expectation_argument("include_config", True)
def list_available_expectation_types(self):
keys = dir(self)
return [
expectation for expectation in keys if expectation.startswith("expect_")
]
def autoinspect(self, profiler):
"""Deprecated: use profile instead.
Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.
Args:
profiler: The profiler to use
Returns:
tuple(expectation_suite, validation_results)
"""
warnings.warn(
"The term autoinspect is deprecated and will be removed in a future release. Please use 'profile'\
instead."
)
expectation_suite, validation_results = profiler.profile(self)
return expectation_suite, validation_results
def profile(self, profiler, profiler_configuration=None):
"""Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.
Args:
profiler: The profiler to use
profiler_configuration: Optional profiler configuration dict
Returns:
tuple(expectation_suite, validation_results)
"""
expectation_suite, validation_results = profiler.profile(
self, profiler_configuration
)
return expectation_suite, validation_results
# TODO: add warning if no expectation_explorer_manager and how to turn on
def edit_expectation_suite(self):
return self._data_context._expectation_explorer_manager.edit_expectation_suite(
self
)
@classmethod
def expectation(cls, method_arg_names):
"""Manages configuration and running of expectation objects.
Expectation builds and saves a new expectation configuration to the DataAsset object. It is the core decorator \
used by great expectations to manage expectation configurations.
Args:
method_arg_names (List) : An ordered list of the arguments used by the method implementing the expectation \
(typically the result of inspection). Positional arguments are explicitly mapped to \
keyword arguments when the expectation is run.
Notes:
Intermediate decorators that call the core @expectation decorator will most likely need to pass their \
decorated methods' signature up to the expectation decorator. For example, the MetaPandasDataset \
column_map_expectation decorator relies on the DataAsset expectation decorator, but will pass through the \
signature from the implementing method.
@expectation intercepts and takes action based on the following parameters:
* include_config (boolean or None) : \
If True, then include the generated expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
* catch_exceptions (boolean or None) : \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
* result_format (str or None) : \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
* meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
"""
def outer_wrapper(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
# Get the name of the method
method_name = func.__name__
# Combine all arguments into a single new "all_args" dictionary to name positional parameters
all_args = dict(zip(method_arg_names, args))
all_args.update(kwargs)
# Unpack display parameters; remove them from all_args if appropriate
if "include_config" in kwargs:
include_config = kwargs["include_config"]
del all_args["include_config"]
else:
include_config = self.default_expectation_args["include_config"]
if "catch_exceptions" in kwargs:
catch_exceptions = kwargs["catch_exceptions"]
del all_args["catch_exceptions"]
else:
catch_exceptions = self.default_expectation_args["catch_exceptions"]
if "result_format" in kwargs:
result_format = kwargs["result_format"]
else:
result_format = self.default_expectation_args["result_format"]
# Extract the meta object for use as a top-level expectation_config holder
if "meta" in kwargs:
meta = kwargs["meta"]
del all_args["meta"]
else:
meta = None
# Get the signature of the inner wrapper:
argspec = inspect.getfullargspec(func)[0][1:]
if "result_format" in argspec:
all_args["result_format"] = result_format
else:
if "result_format" in all_args:
del all_args["result_format"]
all_args = recursively_convert_to_json_serializable(all_args)
# Patch in PARAMETER args, and remove locally-supplied arguments
# This will become the stored config
expectation_args = copy.deepcopy(all_args)
if self._expectation_suite.evaluation_parameters:
(
evaluation_args,
substituted_parameters,
) = build_evaluation_parameters(
expectation_args,
self._expectation_suite.evaluation_parameters,
self._config.get("interactive_evaluation", True),
self._data_context,
)
else:
(
evaluation_args,
substituted_parameters,
) = build_evaluation_parameters(
expectation_args,
None,
self._config.get("interactive_evaluation", True),
self._data_context,
)
# Construct the expectation_config object
expectation_config = ExpectationConfiguration(
expectation_type=method_name, kwargs=expectation_args, meta=meta
)
raised_exception = False
exception_traceback = None
exception_message = None
# Finally, execute the expectation method itself
if (
self._config.get("interactive_evaluation", True)
or self._active_validation
):
try:
return_obj = func(self, **evaluation_args)
if isinstance(return_obj, dict):
return_obj = ExpectationValidationResult(**return_obj)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
exception_message = "{}: {}".format(
type(err).__name__, str(err)
)
return_obj = ExpectationValidationResult(success=False)
else:
raise err
else:
return_obj = ExpectationValidationResult(
expectation_config=copy.deepcopy(expectation_config)
)
# If validate has set active_validation to true, then we do not save the config to avoid
# saving updating expectation configs to the same suite during validation runs
if self._active_validation is True:
stored_config = expectation_config
else:
# Append the expectation to the config.
stored_config = self._expectation_suite.add_expectation(
expectation_config
)
if include_config:
return_obj.expectation_config = copy.deepcopy(stored_config)
# If there was no interactive evaluation, success will not have been computed.
if return_obj.success is not None:
# Add a "success" object to the config
stored_config.success_on_last_run = return_obj.success
if catch_exceptions:
return_obj.exception_info = {
"raised_exception": raised_exception,
"exception_message": exception_message,
"exception_traceback": exception_traceback,
}
if len(substituted_parameters) > 0:
if meta is None:
meta = dict()
meta["substituted_parameters"] = substituted_parameters
# Add meta to return object
if meta is not None:
return_obj.meta = meta
return_obj = recursively_convert_to_json_serializable(return_obj)
if self._data_context is not None:
return_obj = self._data_context.update_return_obj(self, return_obj)
return return_obj
return wrapper
return outer_wrapper
def _initialize_expectations(
self, expectation_suite=None, expectation_suite_name=None
):
"""Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`.
In addition, this always sets the `default_expectation_args` to:
`include_config`: False,
`catch_exceptions`: False,
`output_format`: 'BASIC'
By default, initializes data_asset_type to the name of the implementing class, but subclasses
that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their
interoperability.
Args:
expectation_suite (json): \
A json-serializable expectation config. \
If None, creates default `_expectation_suite` with an empty list of expectations and \
key value `data_asset_name` as `data_asset_name`.
expectation_suite_name (string): \
The name to assign to the `expectation_suite.expectation_suite_name`
Returns:
None
"""
if expectation_suite is not None:
if isinstance(expectation_suite, dict):
expectation_suite = expectationSuiteSchema.load(expectation_suite)
else:
expectation_suite = copy.deepcopy(expectation_suite)
self._expectation_suite = expectation_suite
if expectation_suite_name is not None:
if (
self._expectation_suite.expectation_suite_name
!= expectation_suite_name
):
logger.warning(
"Overriding existing expectation_suite_name {n1} with new name {n2}".format(
n1=self._expectation_suite.expectation_suite_name,
n2=expectation_suite_name,
)
)
self._expectation_suite.expectation_suite_name = expectation_suite_name
else:
if expectation_suite_name is None:
expectation_suite_name = "default"
self._expectation_suite = ExpectationSuite(
expectation_suite_name=expectation_suite_name
)
self._expectation_suite.data_asset_type = self._data_asset_type
self.default_expectation_args = {
"include_config": True,
"catch_exceptions": False,
"result_format": "BASIC",
}
def append_expectation(self, expectation_config):
"""This method is a thin wrapper for ExpectationSuite.append_expectation"""
warnings.warn(
"append_expectation is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.add_expectation instead.",
DeprecationWarning,
)
self._expectation_suite.append_expectation(expectation_config)
def find_expectation_indexes(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
) -> List[int]:
"""This method is a thin wrapper for ExpectationSuite.find_expectation_indexes"""
warnings.warn(
"find_expectation_indexes is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.find_expectation_indexes instead.",
DeprecationWarning,
)
return self._expectation_suite.find_expectation_indexes(
expectation_configuration=expectation_configuration, match_type=match_type
)
def find_expectations(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
) -> List[ExpectationConfiguration]:
"""This method is a thin wrapper for ExpectationSuite.find_expectations()"""
warnings.warn(
"find_expectations is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.find_expectation_indexes instead.",
DeprecationWarning,
)
return self._expectation_suite.find_expectations(
expectation_configuration=expectation_configuration, match_type=match_type
)
def remove_expectation(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
remove_multiple_matches: bool = False,
) -> List[ExpectationConfiguration]:
"""This method is a thin wrapper for ExpectationSuite.remove()"""
warnings.warn(
"DataAsset.remove_expectations is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.remove_expectation instead.",
DeprecationWarning,
)
return self._expectation_suite.remove_expectation(
expectation_configuration=expectation_configuration,
match_type=match_type,
remove_multiple_matches=remove_multiple_matches,
)
def set_config_value(self, key, value):
self._config[key] = value
def get_config_value(self, key):
return self._config[key]
@property
def batch_kwargs(self):
return self._batch_kwargs
@property
def batch_id(self):
return self.batch_kwargs.to_id()
@property
def batch_markers(self):
return self._batch_markers
@property
def batch_parameters(self):
return self._batch_parameters
def discard_failing_expectations(self):
res = self.validate(only_return_failures=True).results
if any(res):
for item in res:
self.remove_expectation(
expectation_configuration=item.expectation_config,
match_type="runtime",
)
warnings.warn("Removed %s expectations that were 'False'" % len(res))
def get_default_expectation_arguments(self):
"""Fetch default expectation arguments for this data_asset
Returns:
A dictionary containing all the current default expectation arguments for a data_asset
Ex::
{
"include_config" : True,
"catch_exceptions" : False,
"result_format" : 'BASIC'
}
See also:
set_default_expectation_arguments
"""
return self.default_expectation_args
def set_default_expectation_argument(self, argument, value):
"""Set a default expectation argument for this data_asset
Args:
argument (string): The argument to be replaced
value : The New argument to use for replacement
Returns:
None
See also:
get_default_expectation_arguments
"""
# !!! Maybe add a validation check here?
self.default_expectation_args[argument] = value
def get_expectations_config(
self,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_config_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False,
):
warnings.warn(
"get_expectations_config is deprecated, and will be removed in a future release. "
+ "Please use get_expectation_suite instead.",
DeprecationWarning,
)
return self.get_expectation_suite(
discard_failed_expectations,
discard_result_format_kwargs,
discard_include_config_kwargs,
discard_catch_exceptions_kwargs,
suppress_warnings,
)
def get_expectation_suite(
self,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_config_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False,
suppress_logging=False,
):
"""Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): \
Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): \
In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.
discard_include_config_kwargs (boolean): \
In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`.
discard_catch_exceptions_kwargs (boolean): \
In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.
suppress_warnings (boolean): \
If true, do not include warnings in logging information about the operation.
suppress_logging (boolean): \
If true, do not create a log entry (useful when using get_expectation_suite programmatically)
Returns:
An expectation suite.
Note:
get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a \
copy of _expectation_suite, not the original object.
"""
expectation_suite = copy.deepcopy(self._expectation_suite)
expectations = expectation_suite.expectations
discards = defaultdict(int)
if discard_failed_expectations:
new_expectations = []
for expectation in expectations:
# Note: This is conservative logic.
# Instead of retaining expectations IFF success==True, it discard expectations IFF success==False.
# In cases where expectation.success is missing or None, expectations are *retained*.
# Such a case could occur if expectations were loaded from a config file and never run.
if expectation.success_on_last_run is False:
discards["failed_expectations"] += 1
else:
new_expectations.append(expectation)
expectations = new_expectations
message = "\t%d expectation(s) included in expectation_suite." % len(
expectations
)
if discards["failed_expectations"] > 0 and not suppress_warnings:
message += (
" Omitting %d expectation(s) that failed when last run; set "
"discard_failed_expectations=False to include them."
% discards["failed_expectations"]
)
for expectation in expectations:
# FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation,
# which calls _copy_and_clean_up_expectation
expectation.success_on_last_run = None
if discard_result_format_kwargs:
if "result_format" in expectation.kwargs:
del expectation.kwargs["result_format"]
discards["result_format"] += 1
if discard_include_config_kwargs:
if "include_config" in expectation.kwargs:
del expectation.kwargs["include_config"]
discards["include_config"] += 1
if discard_catch_exceptions_kwargs:
if "catch_exceptions" in expectation.kwargs:
del expectation.kwargs["catch_exceptions"]
discards["catch_exceptions"] += 1
settings_message = ""
if discards["result_format"] > 0 and not suppress_warnings:
settings_message += " result_format"
if discards["include_config"] > 0 and not suppress_warnings:
settings_message += " include_config"
if discards["catch_exceptions"] > 0 and not suppress_warnings:
settings_message += " catch_exceptions"
if (
len(settings_message) > 1
): # Only add this if we added one of the settings above.
settings_message += " settings filtered."
expectation_suite.expectations = expectations
if not suppress_logging:
logger.info(message + settings_message)
return expectation_suite
def save_expectation_suite(
self,
filepath=None,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_config_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False,
):
"""Writes ``_expectation_config`` to a JSON file.
Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations \
can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value \
pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from \
the JSON expectations config.
Args:
filepath (string): \
The location and name to write the JSON config file to.
discard_failed_expectations (boolean): \
If True, excludes expectations that do not return ``success = True``. \
If False, all expectations are written to the JSON config file.
discard_result_format_kwargs (boolean): \
If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config \
file.
discard_include_config_kwargs (boolean): \
If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config \
file.
discard_catch_exceptions_kwargs (boolean): \
If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON \
config file.
suppress_warnings (boolean): \
It True, all warnings raised by Great Expectations, as a result of dropped expectations, are \
suppressed.
"""
expectation_suite = self.get_expectation_suite(
discard_failed_expectations,
discard_result_format_kwargs,
discard_include_config_kwargs,
discard_catch_exceptions_kwargs,
suppress_warnings,
)
if filepath is None and self._data_context is not None:
self._data_context.save_expectation_suite(expectation_suite)
elif filepath is not None:
with open(filepath, "w") as outfile:
json.dump(
expectationSuiteSchema.dump(expectation_suite),
outfile,
indent=2,
sort_keys=True,
)
else:
raise ValueError(
"Unable to save config: filepath or data_context must be available."
)
def validate(
self,
expectation_suite=None,
run_id=None,
data_context=None,
evaluation_parameters=None,
catch_exceptions=True,
result_format=None,
only_return_failures=False,
run_name=None,
run_time=None,
):
"""Generates a JSON-formatted report describing the outcome of all expectations.
Use the default expectation_suite=None to validate the expectations config associated with the DataAsset.
Args:
expectation_suite (json or None): \
If None, uses the expectations config generated with the DataAsset during the current session. \
If a JSON file, validates those expectations.
run_name (str): \
Used to identify this validation result as part of a collection of validations. \
See DataContext for more information.
data_context (DataContext): \
A datacontext object to use as part of validation for binding evaluation parameters and \
registering validation results.
evaluation_parameters (dict or None): \
If None, uses the evaluation_paramters from the expectation_suite provided or as part of the \
data_asset. If a dict, uses the evaluation parameters in the dictionary.
catch_exceptions (boolean): \
If True, exceptions raised by tests will not end validation and will be described in the returned \
report.
result_format (string or None): \
If None, uses the default value ('BASIC' or as specified). \
If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', \
etc.).
only_return_failures (boolean): \
If True, expectation results are only returned when ``success = False`` \
Returns:
A JSON-formatted dictionary containing a list of the validation results. \
An example of the returned format::
{
"results": [
{
"unexpected_list": [unexpected_value_1, unexpected_value_2],
"expectation_type": "expect_*",
"kwargs": {
"column": "Column_Name",
"output_format": "SUMMARY"
},
"success": true,
"raised_exception: false.
"exception_traceback": null
},
{
... (Second expectation results)
},
... (More expectations results)
],
"success": true,
"statistics": {
"evaluated_expectations": n,
"successful_expectations": m,
"unsuccessful_expectations": n - m,
"success_percent": m / n
}
}
Notes:
If the configuration object was built with a different version of great expectations then the \
current environment. If no version was found in the configuration file.
Raises:
AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError
"""
try:
validation_time = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
assert not (run_id and run_name) and not (
run_id and run_time
), "Please provide either a run_id or run_name and/or run_time."
if isinstance(run_id, str) and not run_name:
warnings.warn(
"String run_ids will be deprecated in the future. Please provide a run_id of type "
"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name "
"and run_time (both optional). Instead of providing a run_id, you may also provide"
"run_name and run_time separately.",
DeprecationWarning,
)
try:
run_time = parse(run_id)
except (ValueError, TypeError):
pass
run_id = RunIdentifier(run_name=run_id, run_time=run_time)
elif isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif not isinstance(run_id, RunIdentifier):
run_id = RunIdentifier(run_name=run_name, run_time=run_time)
self._active_validation = True
# If a different validation data context was provided, override
validate__data_context = self._data_context
if data_context is None and self._data_context is not None:
data_context = self._data_context
elif data_context is not None:
# temporarily set self._data_context so it is used inside the expectation decorator
self._data_context = data_context
results = []
if expectation_suite is None:
expectation_suite = self.get_expectation_suite(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_include_config_kwargs=False,
discard_catch_exceptions_kwargs=False,
)
elif isinstance(expectation_suite, str):
try:
with open(expectation_suite) as infile:
expectation_suite = expectationSuiteSchema.loads(infile.read())
except ValidationError:
raise
except OSError:
raise GreatExpectationsError(
"Unable to load expectation suite: IO error while reading %s"
% expectation_suite
)
elif not isinstance(expectation_suite, ExpectationSuite):
logger.error(
"Unable to validate using the provided value for expectation suite; does it need to be "
"loaded from a dictionary?"
)
if getattr(data_context, "_usage_statistics_handler", None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="data_asset.validate",
event_payload=handler._batch_anonymizer.anonymize_batch_info(
self
),
success=False,
)
return ExpectationValidationResult(success=False)
# Evaluation parameter priority is
# 1. from provided parameters
# 2. from expectation configuration
# 3. from data context
# So, we load them in reverse order
if data_context is not None:
runtime_evaluation_parameters = (
data_context.evaluation_parameter_store.get_bind_params(run_id)
)
else:
runtime_evaluation_parameters = {}
if expectation_suite.evaluation_parameters:
runtime_evaluation_parameters.update(
expectation_suite.evaluation_parameters
)
if evaluation_parameters is not None:
runtime_evaluation_parameters.update(evaluation_parameters)
# Convert evaluation parameters to be json-serializable
runtime_evaluation_parameters = recursively_convert_to_json_serializable(
runtime_evaluation_parameters
)
# Warn if our version is different from the version in the configuration
# TODO: Deprecate "great_expectations.__version__"
suite_ge_version = expectation_suite.meta.get(
"great_expectations_version"
) or expectation_suite.meta.get("great_expectations.__version__")
if suite_ge_version:
if suite_ge_version != ge_version:
warnings.warn(
"WARNING: This configuration object was built using version %s of great_expectations, but "
"is currently being validated by version %s."
% (
suite_ge_version,
ge_version,
)
)
else:
warnings.warn(
"WARNING: No great_expectations version found in configuration object."
)
###
# This is an early example of what will become part of the ValidationOperator
# This operator would be dataset-semantic aware
# Adding now to simply ensure we can be slightly better at ordering our expectation evaluation
###
# Group expectations by column
columns = {}
for expectation in expectation_suite.expectations:
if "column" in expectation.kwargs and isinstance(
expectation.kwargs["column"], Hashable
):
column = expectation.kwargs["column"]
else:
column = "_nocolumn"
if column not in columns:
columns[column] = []
columns[column].append(expectation)
expectations_to_evaluate = []
for col in columns:
expectations_to_evaluate.extend(columns[col])
for expectation in expectations_to_evaluate:
try:
# copy the config so we can modify it below if needed
expectation = copy.deepcopy(expectation)
expectation_method = getattr(self, expectation.expectation_type)
if result_format is not None:
expectation.kwargs.update({"result_format": result_format})
# A missing parameter will raise an EvaluationParameterError
(
evaluation_args,
substituted_parameters,
) = build_evaluation_parameters(
expectation.kwargs,
runtime_evaluation_parameters,
self._config.get("interactive_evaluation", True),
self._data_context,
)
result = expectation_method(
catch_exceptions=catch_exceptions,
include_config=True,
**evaluation_args
)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
result = ExpectationValidationResult(
success=False,
exception_info={
"raised_exception": raised_exception,
"exception_traceback": exception_traceback,
"exception_message": str(err),
},
)
else:
raise err
# if include_config:
result.expectation_config = expectation
# Add an empty exception_info object if no exception was caught
if catch_exceptions and result.exception_info is None:
result.exception_info = {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
}
results.append(result)
statistics = _calc_validation_statistics(results)
if only_return_failures:
abbrev_results = []
for exp in results:
if not exp.success:
abbrev_results.append(exp)
results = abbrev_results
expectation_suite_name = expectation_suite.expectation_suite_name
result = ExpectationSuiteValidationResult(
results=results,
success=statistics.success,
statistics={
"evaluated_expectations": statistics.evaluated_expectations,
"successful_expectations": statistics.successful_expectations,
"unsuccessful_expectations": statistics.unsuccessful_expectations,
"success_percent": statistics.success_percent,
},
evaluation_parameters=runtime_evaluation_parameters,
meta={
"great_expectations_version": ge_version,
"expectation_suite_name": expectation_suite_name,
"run_id": run_id,
"batch_kwargs": self.batch_kwargs,
"batch_markers": self.batch_markers,
"batch_parameters": self.batch_parameters,
"validation_time": validation_time,
},
)
self._data_context = validate__data_context
except Exception:
if getattr(data_context, "_usage_statistics_handler", None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="data_asset.validate",
event_payload=handler._batch_anonymizer.anonymize_batch_info(self),
success=False,
)
raise
finally:
self._active_validation = False
if getattr(data_context, "_usage_statistics_handler", None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="data_asset.validate",
event_payload=handler._batch_anonymizer.anonymize_batch_info(self),
success=True,
)
return result
def get_evaluation_parameter(self, parameter_name, default_value=None):
"""Get an evaluation parameter value that has been stored in meta.
Args:
parameter_name (string): The name of the parameter to store.
default_value (any): The default value to be returned if the parameter is not found.
Returns:
The current value of the evaluation parameter.
"""
if parameter_name in self._expectation_suite.evaluation_parameters:
return self._expectation_suite.evaluation_parameters[parameter_name]
else:
return default_value
def set_evaluation_parameter(self, parameter_name, parameter_value):
"""Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate
parameterized expectations.
Args:
parameter_name (string): The name of the kwarg to be replaced at evaluation time
parameter_value (any): The value to be used
"""
self._expectation_suite.evaluation_parameters.update(
{parameter_name: parameter_value}
)
def add_citation(
self,
comment,
batch_kwargs=None,
batch_markers=None,
batch_parameters=None,
citation_date=None,
):
if batch_kwargs is None:
batch_kwargs = self.batch_kwargs
if batch_markers is None:
batch_markers = self.batch_markers
if batch_parameters is None:
batch_parameters = self.batch_parameters
self._expectation_suite.add_citation(
comment,
batch_kwargs=batch_kwargs,
batch_markers=batch_markers,
batch_parameters=batch_parameters,
citation_date=citation_date,
)
@property
def expectation_suite_name(self):
"""Gets the current expectation_suite name of this data_asset as stored in the expectations configuration."""
return self._expectation_suite.expectation_suite_name
@expectation_suite_name.setter
def expectation_suite_name(self, expectation_suite_name):
"""Sets the expectation_suite name of this data_asset as stored in the expectations configuration."""
self._expectation_suite.expectation_suite_name = expectation_suite_name
###
#
# Output generation
#
###
def _format_map_output(
self,
result_format,
success,
element_count,
nonnull_count,
unexpected_count,
unexpected_list,
unexpected_index_list,
):
"""Helper function to construct expectation result objects for map_expectations (such as column_map_expectation
and file_lines_map_expectation).
Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE.
In each case, the object returned has a different set of populated fields.
See :ref:`result_format` for more information.
This function handles the logic for mapping those fields for column_map_expectations.
"""
# NB: unexpected_count parameter is explicit some implementing classes may limit the length of unexpected_list
# Retain support for string-only output formats:
result_format = parse_result_format(result_format)
# Incrementally add to result and return when all values for the specified level are present
return_obj = {"success": success}
if result_format["result_format"] == "BOOLEAN_ONLY":
return return_obj
missing_count = element_count - nonnull_count
if element_count > 0:
missing_percent = missing_count / element_count * 100
if nonnull_count > 0:
unexpected_percent_total = unexpected_count / element_count * 100
unexpected_percent_nonmissing = unexpected_count / nonnull_count * 100
else:
unexpected_percent_total = None
unexpected_percent_nonmissing = None
else:
missing_percent = None
unexpected_percent_total = None
unexpected_percent_nonmissing = None
return_obj["result"] = {
"element_count": element_count,
"missing_count": missing_count,
"missing_percent": missing_percent,
"unexpected_count": unexpected_count,
"unexpected_percent": unexpected_percent_nonmissing,
"unexpected_percent_total": unexpected_percent_total,
"unexpected_percent_nonmissing": unexpected_percent_nonmissing,
"partial_unexpected_list": unexpected_list[
: result_format["partial_unexpected_count"]
],
}
if result_format["result_format"] == "BASIC":
return return_obj
# Try to return the most common values, if possible.
if 0 < result_format.get("partial_unexpected_count"):
try:
partial_unexpected_counts = [
{"value": key, "count": value}
for key, value in sorted(
Counter(unexpected_list).most_common(
result_format["partial_unexpected_count"]
),
key=lambda x: (-x[1], str(x[0])),
)
]
except TypeError:
partial_unexpected_counts = []
if "details" not in return_obj["result"]:
return_obj["result"]["details"] = {}
return_obj["result"]["details"][
"partial_unexpected_counts_error"
] = "partial_unexpected_counts requested, but requires a hashable type"
finally:
return_obj["result"].update(
{
"partial_unexpected_index_list": unexpected_index_list[
: result_format["partial_unexpected_count"]
]
if unexpected_index_list is not None
else None,
"partial_unexpected_counts": partial_unexpected_counts,
}
)
if result_format["result_format"] == "SUMMARY":
return return_obj
return_obj["result"].update(
{
"unexpected_list": unexpected_list,
"unexpected_index_list": unexpected_index_list,
}
)
if result_format["result_format"] == "COMPLETE":
return return_obj
raise ValueError(
"Unknown result_format {}.".format(result_format["result_format"])
)
def _calc_map_expectation_success(self, success_count, nonnull_count, mostly):
"""Calculate success and percent_success for column_map_expectations
Args:
success_count (int): \
The number of successful values in the column
nonnull_count (int): \
The number of nonnull values in the column
mostly (float or None): \
A value between 0 and 1 (or None), indicating the fraction of successes required to pass the \
expectation as a whole. If mostly=None, then all values must succeed in order for the expectation as \
a whole to succeed.
Returns:
success (boolean), percent_success (float)
"""
if isinstance(success_count, decimal.Decimal):
raise ValueError(
"success_count must not be a decimal; check your db configuration"
)
if isinstance(nonnull_count, decimal.Decimal):
raise ValueError(
"nonnull_count must not be a decimal; check your db configuration"
)
if nonnull_count > 0:
percent_success = success_count / nonnull_count
if mostly is not None:
success = bool(percent_success >= mostly)
else:
success = bool(nonnull_count - success_count == 0)
else:
success = True
percent_success = None
return success, percent_success
###
#
# Iterative testing for custom expectations
#
###
def test_expectation_function(self, function, *args, **kwargs):
"""Test a generic expectation function
Args:
function (func): The function to be tested. (Must be a valid expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
A JSON-serializable expectation result object.
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you will still need \
to define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
"""
argspec = inspect.getfullargspec(function)[0][1:]
new_function = self.expectation(argspec)(function)
return new_function(self, *args, **kwargs)
ValidationStatistics = namedtuple(
"ValidationStatistics",
[
"evaluated_expectations",
"successful_expectations",
"unsuccessful_expectations",
"success_percent",
"success",
],
)
def _calc_validation_statistics(validation_results):
"""
Calculate summary statistics for the validation results and
return ``ExpectationStatistics``.
"""
# calc stats
successful_expectations = sum(exp.success for exp in validation_results)
evaluated_expectations = len(validation_results)
unsuccessful_expectations = evaluated_expectations - successful_expectations
success = successful_expectations == evaluated_expectations
try:
success_percent = successful_expectations / evaluated_expectations * 100
except ZeroDivisionError:
# success_percent = float("nan")
success_percent = None
return ValidationStatistics(
successful_expectations=successful_expectations,
evaluated_expectations=evaluated_expectations,
unsuccessful_expectations=unsuccessful_expectations,
success=success,
success_percent=success_percent,
)
| 41.271407 | 134 | 0.599392 | [
"Apache-2.0"
] | BSofo/great_expectations | great_expectations/data_asset/data_asset.py | 53,983 | Python |
"""update liscence colum to hash
Revision ID: 0a769c5cda0a
Revises: 1de63d54c3b7
Create Date: 2018-06-21 17:57:36.549097
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0a769c5cda0a'
down_revision = '1de63d54c3b7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('user', 'birth',
existing_type=sa.VARCHAR(length=80),
type_=sa.String(length=128),
existing_nullable=True)
op.alter_column('user', 'liscence_2',
existing_type=sa.VARCHAR(length=80),
type_=sa.String(length=128),
existing_nullable=True)
op.alter_column('user', 'liscence_3',
existing_type=sa.VARCHAR(length=80),
type_=sa.String(length=128),
existing_nullable=True)
op.alter_column('user', 'liscence_4',
existing_type=sa.VARCHAR(length=80),
type_=sa.String(length=128),
existing_nullable=True)
op.alter_column('user', 'serial',
existing_type=sa.VARCHAR(length=80),
type_=sa.String(length=128),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('user', 'serial',
existing_type=sa.String(length=128),
type_=sa.VARCHAR(length=80),
existing_nullable=True)
op.alter_column('user', 'liscence_4',
existing_type=sa.String(length=128),
type_=sa.VARCHAR(length=80),
existing_nullable=True)
op.alter_column('user', 'liscence_3',
existing_type=sa.String(length=128),
type_=sa.VARCHAR(length=80),
existing_nullable=True)
op.alter_column('user', 'liscence_2',
existing_type=sa.String(length=128),
type_=sa.VARCHAR(length=80),
existing_nullable=True)
op.alter_column('user', 'birth',
existing_type=sa.String(length=128),
type_=sa.VARCHAR(length=80),
existing_nullable=True)
# ### end Alembic commands ###
| 34.373134 | 65 | 0.597916 | [
"MIT"
] | todhm/wicarproject | wicarproject/migrations/versions/0a769c5cda0a_update_liscence_colum_to_hash.py | 2,303 | Python |
import os
from plugins import BaseAssessment
from yapsy.IPlugin import IPlugin
from asmtypes import ArastDataOutputError
class ReaprAssessment(BaseAssessment, IPlugin):
OUTPUT = 'contigs'
def run(self):
"""
Build the command and run.
Return list of file(s)
"""
contigs = self.data.contigfiles
reads = self.data.readsets
if len(contigs) > 1:
raise Exception('Reapr: multiple contig files!')
#### Generate Bamfiles
if len(reads) > 1:
self.out_module.write('WARNING: Reapr will use only one read library')
read_pair = reads[0].files
bamfile = os.path.join(self.outpath, 'out.bam')
cmd_args = [self.executable, 'smaltmap', contigs[0],
read_pair[0], read_pair[1], bamfile]
self.arast_popen(cmd_args)
if not os.path.exists(bamfile):
raise ArastDataOutputError('REAPR: Unable to create alignment')
#### Run REAPR Pipeline
rpr_outpath = os.path.join(self.outpath, 'output')
cmd_args = [self.executable, 'pipeline', contigs[0], bamfile, rpr_outpath]
self.arast_popen(cmd_args)
# Move files into root dir
for f in os.listdir(rpr_outpath):
old = os.path.join(rpr_outpath, f)
new = os.path.join(self.outpath, f)
os.rename(old, new)
broken = os.path.join(self.outpath, '04.break.broken_assembly.fa')
if os.path.exists(broken):
return {'contigs': [broken]}
| 33.255319 | 82 | 0.602047 | [
"MIT"
] | levinas/assembly | lib/assembly/plugins/reapr.py | 1,563 | Python |
#!/usr/bin/env python3
# Northcliff Airconditioner Controller Version 3.48 Gen
import RPi.GPIO as GPIO
import time
from datetime import datetime
#import requests
#from threading import Thread
import paho.mqtt.client as mqtt
import struct
import json
import serial
import binascii
import sys
import spidev
import math
import os
class NorthcliffAirconController(object):
def __init__(self, calibrate_damper_on_startup):
# Set up GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
self.control_enable = 17
self.damper_control = 25
self.damper_stop = 24
self.damper_zone = 23
GPIO.setup(self.control_enable, GPIO.OUT)
GPIO.setup(self.damper_control, GPIO.OUT)
GPIO.setup(self.damper_stop, GPIO.OUT)
GPIO.setup(self.damper_zone, GPIO.OUT)
GPIO.output(self.control_enable, False)
self.damper_control_state = False
GPIO.output(self.damper_control, False)
self.damper_stop_state = False
GPIO.output(self.damper_stop, False)
self.damper_zone_state = False
GPIO.output(self.damper_zone, False)
# Aircon Startup Mode
self.remote_operation_on = False # This flag keeps track of whether the aircon is under remote or autonomous operation
self.enable_serial_comms_loop = False # This flag is set to True during remote operation to enable the serial comms loop when the aircon is under remote operations
self.heating = False # Mirrors aircon heating state indicator
self.compressor = False # Mirrors aircon compressor state indicator
self.malfunction = False # Mirrors aircon malfunction state indicator and is used to indicate a malfunction in the aircon/controller comms
self.heat_mode = False # Mirrors aircon heat mode indicator
self.cool_mode = False # Mirrors aircon cool mode indicator
self.fan_mode = False # Mirrors aircon fan mode indicator
self.fan_hi = False # Mirrors aircon fan hi indicator
self.fan_med = False # Mirrors aircon fan med indicator
self.fan_lo = False # Mirrors aircon fan lo indicator
self.filter = False # Mirrors aircon filter indicator
# Set up damper states
self.requested_damper_percent = 100
self.adjusting_damper = False
# Set default damper positions
self.damper_day_position = 416
self.damper_night_position = 1648
self.calibrate_damper_on_startup = calibrate_damper_on_startup
# Set up heartbeat
self.heartbeat_count = 0
self.no_heartbeat_ack = False
# Set up Serial Comms Data
self.packet1_header_a = '00'
self.packet1_header_b = '8f'
self.packet1_header = self.packet1_header_a + self.packet1_header_b
self.packet2_header_a = '80'
self.packet2_header_b = '8c'
self.packet2_header = self.packet2_header_a + self.packet2_header_b
self.packet3_initial_header = self.packet1_header
self.mode = {'Auto On': 'b0', 'Auto Off': '90', 'Dry On': 'b1', 'Dry Off': '91', 'Cool On': 'b2', 'Cool Off': '92', 'Fan On': 'b3', 'Fan Off': '93', 'Heat On': 'b4', 'Heat Off': '94'}
self.set_temp = {'18 degrees': '48', '19 degrees': '4a', '20 degrees': '4c', '21 degrees': '4e', '22 degrees': '50', '23 degrees': '52', '24 degrees': '54', '25 degrees': '56', '26 degrees': '58',
'27 degrees': '5a', '28 degrees': '5c', '29 degrees': '5e', '30 degrees': '60'}
self.fan_speed = {'Lo On': 'f0', 'Lo Off': 'e0', 'Med On': 'f1', 'Med Off': 'e1', 'Hi On': 'f2', 'Hi Off': 'e2'}
self.clean_filter = {'Reset': 'f1', 'No Reset': 'f0'}
self.alerts = {'Not in Warmup': ['f8', 'fa'], 'Warmup': ['f9', 'fb'], 'Clean Filter': ['fa', 'fb'], 'Filter OK': ['f8', 'f9']}
self.compressor_state = {'Off': 'e0', 'On': 'e2'}
# Set up dictionaries for Serial Comms Packets to Off, Fan Mode, Fan Lo
self.packet_1_dictionary = {"1Header1": self.packet1_header, "2Mode1": self.mode['Fan Off'], "3Filler1a": "00", "4SetTemp1": self.set_temp['20 degrees'], "5Fan1": self.fan_speed['Hi Off'],
"6Filler1b": "fffff03fffffffffff"}
self.packet_2_dictionary = {"1Header2": self.packet2_header, "2Mode2": self.mode['Fan Off'], "3Filler2a": "00", "4SetTemp2": self.set_temp['20 degrees'], "5Fan2": self.fan_speed['Hi Off'],
"6ActualTemp2": "90", "7Filler2b": "00", "8Unknown2": "e0", "9Alerts2": self.alerts['Warmup'], "10Filler2c": "ffff", "11Compressor2": self.compressor_state['On'],
"12Filler2c": "ffff", "13Checksum2": "00"}
self.packet_3_dictionary = {"1Header3": self.packet3_initial_header, "2Mode3": self.mode['Fan Off'], "3Filler3a": "00", "4SetTemp3": self.set_temp['20 degrees'],
"5Fan3": self.fan_speed['Hi Off'], "6Filler3b": "fffff03fffffffffff"}
# Set up serial port for aircon controller comms
self.aircon_comms = serial.Serial("/dev/ttyAMA0", 1200, parity=serial.PARITY_EVEN, timeout=0.5) # After swapping serial and bluetooth ports so we can use parity
# Set up SPI Port for the damper position sensor
self.spi = spidev.SpiDev()
speed = 50000
self.spi.open(0,0)
self.spi.max_speed_hz = speed
# Initialise damper position sensor
resp = self.spi.xfer2([0x0e, 0x00, 0x00]) # X-Channel Self Test
time.sleep(0.3)
resp = self.spi.xfer2([0x00, 0x00]) # Exit Self Test
time.sleep(0.1)
resp = self.spi.xfer2([0x0f, 0x00, 0x00]) # Y-Channel Self Test
time.sleep(0.3)
resp = self.spi.xfer2([0x00, 0x00]) # Exit Self Test
time.sleep(0.1)
def print_status(self, print_message):
today = datetime.now()
print("")
print(print_message + today.strftime('%A %d %B %Y @ %H:%M:%S'))
def startup(self):
self.print_status("Northcliff Aircon Controller starting up on ")
# Set up mqtt client
self.client = mqtt.Client('aircon') #Create new instance of mqtt Class
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.connect("<your mqtt Broker name>", 1883, 60) #Connect to mqtt broker
self.client.loop_start() #Start mqtt monitor thread
if self.calibrate_damper_on_startup == True:
self.calibrate_damper(damper_movement_time = 180)
# Detect Damper Position and update Home Manager with aircon status
self.detect_damper_position(calibrate = False)
self.update_status()
def on_connect(self, client, userdata, flags, rc): # Print mqtt status on connecting to broker
time.sleep(1)
self.print_status("Connected to mqtt server with result code "+str(rc)+" on ")
print("")
self.client.subscribe("AirconControl")
def on_message(self, client, userdata, msg): # mqtt message method calls
decoded_payload = str(msg.payload.decode("utf-8"))
message = msg.topic+" "+ decoded_payload # Capture message with binary states converted to a string
#print(message)
if str(msg.topic) == 'AirconControl':
parsed_json = json.loads(decoded_payload)
if parsed_json['service'] == 'Off':
self.process_thermo_off_command()
elif parsed_json['service'] == 'Ventilate':
self.process_ventilate_mode()
elif parsed_json['service'] == 'Thermostat Heat':
self.process_thermo_heat_command()
elif parsed_json['service'] == 'Thermostat Cool':
self.process_thermo_cool_command()
elif parsed_json['service'] == 'Thermostat Auto':
self.process_thermo_auto_command()
elif parsed_json['service'] == 'Heat Mode':
self.process_heat_command()
elif parsed_json['service'] == 'Cool Mode':
self.process_cool_command()
elif parsed_json['service'] == 'Fan Mode':
self.process_fan_command()
elif parsed_json['service'] == 'Fan Hi':
self.process_fan_hi_command()
elif parsed_json['service'] == 'Fan Med':
self.process_fan_med_command()
elif parsed_json['service'] == 'Fan Lo':
self.process_fan_lo_command()
elif parsed_json['service'] == 'Damper Percent':
self.requested_damper_percent = parsed_json['value']
self.print_status("Damper Command Received on ")
print("Requested Damper Percent is", self.requested_damper_percent, "Current Damper Percent is", self.reported_damper_percent)
elif parsed_json['service'] == 'Update Status': # If HomeManager wants a status update
self.print_status("Status Update Requested on ")
self.update_status()
elif parsed_json['service'] == 'Heartbeat Ack': # If HomeManager sends a heartbeat ack
self.heartbeat_ack()
else:
print("Received unknown message", str(parsed_json))
def update_status(self): # Send aircon status to Home Manager
status = json.dumps({'service': 'Status Update', 'Remote Operation': self.remote_operation_on, 'Heat': self.heat_mode, 'Cool': self.cool_mode,
'Fan': self.fan_mode, 'Fan Hi': self.fan_hi, 'Fan Med': self.fan_med, 'Fan Lo': self.fan_lo, 'Heating': self.heating,
'Compressor': self.compressor, 'Malfunction': self.malfunction, 'Damper': self.reported_damper_percent, 'Filter': self.filter})
self.client.publish('AirconStatus', status)
### Methods for mqtt messages received from Home Manager ###
def process_thermo_off_command(self):
self.print_status("Thermo Off Command received on ")
self.packet_1_dictionary["2Mode1"] = self.mode['Fan Off'] # Set Fan to Off Mode
self.packet_3_dictionary["2Mode3"] = self.mode['Fan Off'] # Set Fan to Off Mode
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Hi Off'] # Set Fan to High
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Hi Off'] # Set Fan to High
self.cool_mode = False
self.fan_mode = False
self.heat_mode = False
self.fan_med = False
self.fan_hi = False
self.fan_lo = False
self.update_status()
time.sleep(3) # Wait for packets to be sent before disconnecting
self.enable_serial_comms_loop = False # Sets the flag to exit serial comms loop and prepare for disconnect
# The disconnect is done in the main loop so it happens between packet 3 and packet 1
def process_thermo_heat_command(self):
self.print_status("Thermo Heat Command received on ")
if self.remote_operation_on == False: # Turn On
self.remote_operation_on = True
self.enable_serial_comms_loop = True
GPIO.output(self.control_enable, True) # Take Control of Remote
self.damper_control_state = True
GPIO.output(self.damper_control, True) # Take Control of Damper
time.sleep (1.0)
self.packet_1_dictionary["2Mode1"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_1_dictionary["4SetTemp1"] = self.set_temp['30 degrees'] # Set 30 degrees for Heating
self.packet_3_dictionary["2Mode3"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_3_dictionary["4SetTemp3"] = self.set_temp['30 degrees'] # Set 30 degrees for Heating
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Lo On'] # Fan Lo
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Lo On'] # Fan Lo
self.cool_mode = False
self.fan_mode = True
self.heat_mode = False
self.fan_med = False
self.fan_hi = False
self.fan_lo = True
self.update_status()
def process_thermo_cool_command(self):
self.print_status("Thermo Cool Command received on ")
if self.remote_operation_on == False: # Turn On
self.remote_operation_on = True
self.enable_serial_comms_loop = True
GPIO.output(self.control_enable, True) # Take Control of Remote
self.damper_control_state = True
GPIO.output(self.damper_control, True) # Take Control of Damper
time.sleep (1.0)
self.packet_1_dictionary["2Mode1"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_1_dictionary["4SetTemp1"] = self.set_temp['18 degrees'] # Set 18 Degrees for Cooling
self.packet_3_dictionary["2Mode3"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_3_dictionary["4SetTemp3"] = self.set_temp['18 degrees'] # Set 18 Degrees for Cooling
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Lo On'] # Fan Lo
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Lo On'] # Fan Lo
self.cool_mode = False
self.fan_mode = True
self.heat_mode = False
self.fan_med = False
self.fan_hi = False
self.fan_lo = True
self.update_status()
def process_ventilate_mode(self):
self.print_status("Ventilate Command received on ")
if self.remote_operation_on == False: # Turn On
self.remote_operation_on = True
self.enable_serial_comms_loop = True
GPIO.output(self.control_enable, True) # Take Control of Remote
self.damper_control_state = True
GPIO.output(self.damper_control, True) # Take Control of Damper
time.sleep (1.0)
self.packet_1_dictionary["2Mode1"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_1_dictionary["4SetTemp1"] = self.set_temp['21 degrees'] # Set 21 Degrees
self.packet_3_dictionary["2Mode3"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_3_dictionary["4SetTemp3"] = self.set_temp['21 degrees'] # Set 21 Degrees
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Hi On'] # Fan Hi
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Hi On'] # Fan Hi
self.cool_mode = False
self.fan_mode = True
self.heat_mode = False
self.fan_med = False
self.fan_hi = True
self.fan_lo = False
self.update_status()
def process_thermo_auto_command(self): # Holding place if Auto method is to be added in the future
pass
def process_heat_command(self):
self.print_status("Heat Mode Command received on ")
self.packet_1_dictionary["2Mode1"] = self.mode['Heat On'] # Set to Heat Mode
self.packet_1_dictionary["4SetTemp1"] = self.set_temp['30 degrees'] # Set 30 degrees for Heating
self.packet_3_dictionary["2Mode3"] = self.mode['Heat On'] # Set to Heat Mode
self.packet_3_dictionary["4SetTemp3"] = self.set_temp['30 degrees'] # Set 30 degrees for Heating
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Hi On'] # Fan Hi
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Hi On'] # Fan Hi
self.cool_mode = False
self.fan_mode = False
self.heat_mode = True
self.fan_med = False
self.fan_hi = True
self.fan_lo = False
self.update_status()
def process_cool_command(self):
self.print_status("Cool Mode Command received on ")
self.packet_1_dictionary["2Mode1"] = self.mode['Cool On'] # Set to Cool Mode
self.packet_1_dictionary["4SetTemp1"] = self.set_temp['18 degrees'] # Set 18 Degrees for Cooling
self.packet_3_dictionary["2Mode3"] = self.mode['Cool On'] # Set to Cool Mode
self.packet_3_dictionary["4SetTemp3"] = self.set_temp['18 degrees'] # Set 18 Degrees for Cooling
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Hi On'] # Fan Hi
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Hi On'] # Fan Hi
self.cool_mode = True
self.fan_mode = False
self.heat_mode = False
self.fan_med = False
self.fan_hi = True
self.fan_lo = False
self.update_status()
def process_fan_command(self):
self.print_status("Fan Mode Command received on ")
self.packet_1_dictionary["2Mode1"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_3_dictionary["2Mode3"] = self.mode['Fan On'] # Set to Fan Mode
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Lo On'] # Fan Lo
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Lo On'] # Fan Lo
self.cool_mode = False
self.fan_mode = True
self.heat_mode = False
self.fan_med = False
self.fan_hi = False
self.fan_lo = True
self.update_status()
def process_fan_hi_command(self):
self.print_status("Fan Hi Command received on ")
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Hi On'] # Fan Hi
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Hi On'] # Fan Hi
self.fan_med = False
self.fan_hi = True
self.fan_lo = False
self.update_status()
def process_fan_med_command(self):
self.print_status("Fan Med Command received on ")
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Med On'] # Fan Med
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Med On'] # Fan Med
self.fan_med = True
self.fan_hi = False
self.fan_lo = False
self.update_status()
def process_fan_lo_command(self):
self.print_status("Fan Lo Command received on ")
self.packet_1_dictionary["5Fan1"] = self.fan_speed['Lo On'] # Fan Lo
self.packet_3_dictionary["5Fan3"] = self.fan_speed['Lo On'] # Fan Lo
self.fan_med = False
self.fan_hi = False
self.fan_lo = True
self.update_status()
def heartbeat_ack(self):
#self.print_status('Heartbeat received from Home Manager on ')
self.heartbeat_count = 0
self.no_heartbeat_ack = False
### End of Methods for mqtt messages received from Home Manager ###
### Methods called in main loop ###
def process_home_manager_heartbeat(self): # Send heartbeat signal to Home Manager every 120 loops. Turn aircon off and reboot if there's no response within 80 more loops
self.heartbeat_count += 1
if self.heartbeat_count == 120:
#self.print_status('Sending Heartbeat to Home Manager on ')
self.send_heartbeat_to_home_manager()
if self.heartbeat_count > 200:
self.print_status('Home Manager Heartbeat Lost. Setting Aircon to Thermo Off Mode on ')
self.client.publish('AirconStatus', '{"service": "Restart"}')
self.no_heartbeat_ack = True
self.process_thermo_off_command()
time.sleep(10)
os.system('sudo reboot')
def send_heartbeat_to_home_manager(self):
self.client.publish('AirconStatus', '{"service": "Heartbeat"}')
def build_packets(self, packet_1, packet_3): # Build packets 1 and 3 for sending to the aircon
packets = [packet_1, packet_3]
for x in range(2):
sorted_packet = ([value for (key, value) in sorted(packets[x].items())]) # Sort the bytes contained in each packet dictionary into the correct order by using the first digit in the byte key
packet_no_checksum = ''.join(sorted_packet) # Join the packet dictionary bytes into one string
checksum = self.calculate_checksum(packet_no_checksum) # Calculate the checksum
packet_with_checksum = packet_no_checksum + checksum # Add the checksum to the end of the packet
packet_send = bytes.fromhex(''.join(packet_with_checksum)) # Convert the joined packet to binary
if x == 0: # Packet 1
self.packet_1_with_checksum = packet_with_checksum
self.packet_1_send = packet_send
else: # Packet 3
self.packet_3_with_checksum = packet_with_checksum
self.packet_3_send = packet_send
def send_serial_aircon_data(self, packet): # Send packet to aircon comms port
self.aircon_comms.write(packet)
def receive_serial_aircon_data(self): # Receive Packet 2 from aircon comms port
# Look for Packet 2 Header (x808c)
header_loop_count = 0
found_packet_2_header = False
while header_loop_count < 16: # Test an entire packet for header
test_for_header_1 = self.aircon_comms.read(1) # Read one byte to look for the first half of the header
if test_for_header_1 == b'\x80':
test_for_header_2 = self.aircon_comms.read(1) # Read one byte to look for the second half of the header, after sucessfully finding the first half of the header
if test_for_header_2 == b'\x8c':
found_packet_2_header = True # Flag that the correct Packet 2 header has been found
exit_loop_count = header_loop_count # Record the loop count in which the correct header was found (for debugging purposes)
header_loop_count = 16 # Exit the loop if the complete correct header has been found
else:
header_loop_count += 1 # Look for another instance of the first half of the header if the correct second half wasn't found immediately after the first half
else:
header_loop_count += 1 # Keep looking for the first half of the header
if found_packet_2_header == True: # Read the remaining bytes in the packet after the correct Packet 2 Header is found
self.raw_response_1 = self.aircon_comms.read(6) # Capture the next 6 bytes
self.raw_response_2 = self.aircon_comms.read(8) # capture the next 8 bytes
self.raw_response = b"".join([test_for_header_1, test_for_header_2, self.raw_response_1, self.raw_response_2]) # Construct the entire Packet 2 in binary form
self.packet_2 = str(binascii.hexlify(self.raw_response), "utf-8") # Convert Packet to to a string
self.decode_packet(self.packet_2) # Extract each component of Packet 2 and place in a dictionary that decodes the aircon function of each packet byte
else: # Flag that no correct Packet 2 header has been found
print("No valid Packet 2 Header received")
self.packet_2_error = True
self.malfunction = True
def decode_packet(self, packet_2): # Extract each component of Packet 2 and place in a dictionary that decodes the aircon function of each packet byte. Validate checksum and comparison with Packet 1 data
self.packet_2_error = False # Flag that Packet 2 is OK
self.previous_malfunction = self.malfunction # Capture the previous malfunction state
self.malfunction = False # Clear the malfunction flag
self.packet_2_dictionary["1Header2"] = packet_2[0:4]
self.packet_2_dictionary["2Mode2"] = packet_2[4:6]
self.packet_2_dictionary["3Filler2a"] = packet_2[6:8]
self.packet_2_dictionary["4SetTemp2"] = packet_2[8:10]
self.packet_2_dictionary["5Fan2"] = packet_2[10:12]
self.previous_actual_temperature = self.packet_2_dictionary["6ActualTemp2"]
self.packet_2_dictionary["6ActualTemp2"] = packet_2[12:14]
self.packet_2_dictionary["7Filler2b"] = packet_2[14:16]
self.packet_2_dictionary["8Unknown2"] = packet_2[16:18]
self.packet_2_dictionary["9Alerts2"] = packet_2[18:20]
self.packet_2_dictionary["10Filler2c"] = packet_2[20:24]
self.packet_2_dictionary["11Compressor2"] = packet_2[24:26]
self.packet_2_dictionary["12Filler2c"] = packet_2[26:30]
self.packet_2_dictionary["13Checksum2"] = packet_2[30:32]
packet_no_checksum = packet_2[0:30] # Capure the packet without the checksum so that the checksum can be calculated
checksum = self.calculate_checksum(packet_no_checksum)
if self.packet_2_dictionary["11Compressor2"] == self.compressor_state['On']:
if self.compressor == False:
self.compressor = True
#self.print_status("Aircon Compressor Started on ")
self.update_status()
if self.packet_2_dictionary["11Compressor2"] == self.compressor_state['Off']:
if self.compressor == True:
self.compressor = False
#self.print_status("Aircon Compressor Stopped on ")
self.update_status()
if self.packet_2_dictionary["9Alerts2"] == self.alerts['Warmup'][0] or self.packet_2_dictionary["9Alerts2"] == self.alerts['Warmup'][1]:
if self.heating == False:
self.heating = True
#self.print_status("Aircon Warmup Started on ")
self.update_status()
if self.packet_2_dictionary["9Alerts2"] == self.alerts['Not in Warmup'][0] or self.packet_2_dictionary["9Alerts2"] == self.alerts['Not in Warmup'][1]:
if self.heating == True:
self.heating = False
#self.print_status("Aircon Warmup Stopped on ")
self.update_status()
if self.packet_2_dictionary["9Alerts2"] == self.alerts['Clean Filter'][0] or self.packet_2_dictionary["9Alerts2"] == self.alerts['Clean Filter'][1]:
if self.filter == False:
self.filter = True
self.print_status("Filter Clean Alert Active on ")
self.update_status()
if self.packet_2_dictionary["9Alerts2"] == self.alerts['Filter OK'][0] or self.packet_2_dictionary["9Alerts2"] == self.alerts['Filter OK'][1]:
if self.filter == True:
self.filter = False
self.print_status("Filter Clean Alert Reset on ")
self.update_status()
if self.packet_2_dictionary["8Unknown2"] != "e0":
self.print_status("Unknown Byte 8 of Packet 2 ")
print("Expected e0 but received ", self.packet_2_dictionary["8Unknown2"])
if checksum != self.packet_2_dictionary["13Checksum2"]:
print ("Packet 2 Checksum Error. Expected ", checksum, " Received ", self.packet_2_dictionary["13Checksum2"])
self.packet_2_error = True
self.malfunction = True
if packet_2[4:12] != self.packet_1_with_checksum[4:12]:
print("Mismatch between Packets 1 and 2. Expected ", self.packet_1_with_checksum[4:12], " but received ", packet_2[4:12])
if self.malfunction != self.previous_malfunction:
self.update_status()
def calculate_checksum(self, packet_no_checksum): # Calculate and return Packet 2's checksum
b = [packet_no_checksum[i:i+2] for i in range(0, len(packet_no_checksum), 2)] # Build a list of each non-checksum Packet 2 byte in hex string form
c = [int(i, 16) for i in b] # Convert the hex string form list into a list of integers
d = sum(c) % 256 # Sum the integer list in modulo 256
return hex(d)[2:].zfill(2) # Return the checksum in 2 digit hex form
def calculate_next_sequence_number(self, current_number): # Calculate to next Packet 3 sequence number
current_first_byte = int(current_number[0:2], 16) # Convert the first byte in hex string form to an integer
current_third_nibble = int(current_number[2:3], 16) # Convert the third nibble in hex string form to an integer
if current_third_nibble == 11: # The third nibble cycles between Hex 8 and Hex b
next_third_nibble = 8 # Reset to 8 if it's completed its full cycle
if current_first_byte == 50: # The first byte cycles between Hex 00 and Hex 32, incrementing by one when the third nibble completes its full cycle
next_first_byte = 0 # Reset to 0 if it's completed its full cycle
else:
next_first_byte = current_first_byte + 1
else:
next_first_byte = current_first_byte
next_third_nibble = current_third_nibble + 1
next_string = hex(next_first_byte)[2:].zfill(2) + hex(next_third_nibble)[2:] + "f" # Combine the first byte and third nibble in string form, adding hex f at the end to make it two complete bytes
return next_string
def detect_damper_position(self, calibrate):
resp2 = self.spi.xfer2([0x11, 0x00, 0x00])
resp2a = int(resp2.pop(1)/2) # Remove LSB since we only need 10% resolution
resp2b = int(resp2.pop(1)) # Capture but ignore these three bits since we only need 10% resolution
self.damper_position = int(resp2a) * 2 * 8 # Move bits to their correct position and use Y-Axis number as the position
if calibrate == False:
self.current_damper_percent = int((self.damper_night_position - self.damper_position)/((self.damper_night_position - self.damper_day_position)/100))# Sets Day Position at 100% and Night Position at 0% - Assuming that the Night Position has a higher reading from the damper position sensor that the Day Position
# Convert the reported damper percentage to the nearest 10% of the current percentage
if self.current_damper_percent >=95:
self.reported_damper_percent = 100
elif self.current_damper_percent < 95 and self.current_damper_percent >= 85:
self.reported_damper_percent = 90
elif self.current_damper_percent < 85 and self.current_damper_percent >= 75:
self.reported_damper_percent = 80
elif self.current_damper_percent < 75 and self.current_damper_percent >= 65:
self.reported_damper_percent = 70
elif self.current_damper_percent < 65 and self.current_damper_percent >= 55:
self.reported_damper_percent = 60
elif self.current_damper_percent < 55 and self.current_damper_percent >= 45:
self.reported_damper_percent = 50
elif self.current_damper_percent < 45 and self.current_damper_percent >= 35:
self.reported_damper_percent = 40
elif self.current_damper_percent < 35 and self.current_damper_percent >= 25:
self.reported_damper_percent = 30
elif self.current_damper_percent < 25 and self.current_damper_percent >= 15:
self.reported_damper_percent = 20
elif self.current_damper_percent < 15 and self.current_damper_percent >= 5:
self.reported_damper_percent = 10
else:
self.reported_damper_percent = 0
def adjust_damper_position(self):
if self.requested_damper_percent != self.reported_damper_percent:
self.adjusting_damper = True
if self.requested_damper_percent > self.reported_damper_percent:
self.damper_day_zone() # Set damper switch to day zone if the damper's to be moved towards the day zone
else:
self.requested_damper_percent < self.reported_damper_percent
self.damper_night_zone() # Set damper switch to night zone if the damper's to be moved towards the night zone
else:
if self.adjusting_damper == True: # Flag that the damper is no longer being adjusted if it was previously being adjusted
self.adjusting_damper = False
self.update_status()
if self.requested_damper_percent == 100: # Lock damper in Day Zone if the damper is to be wholly in Day Zone
self.damper_day_zone()
elif self.requested_damper_percent == 0: # Lock damper in Night Zone if the dampr is to be wholly in Night Zone
self.damper_night_zone()
else:
self.hold_damper() # Hold damper in position if the damper is to be between zones
def damper_day_zone(self): # Move damper towards the Day Zone
self.damper_stop_state = False
GPIO.output(self.damper_stop, False)
self.damper_zone_state = False
GPIO.output(self.damper_zone, False)
def damper_night_zone(self): # Move damper towards the Night Zone
self.damper_stop_state = False
GPIO.output(self.damper_stop, False)
self.damper_zone_state = True
GPIO.output(self.damper_zone, True)
def hold_damper(self): # Stop damper motion
self.damper_stop_state = True
GPIO.output(self.damper_stop, True)
def calibrate_damper(self, damper_movement_time):
print('Calibrating Damper')
print('Taking Control of Damper')
self.damper_control_state = True
GPIO.output(self.damper_control, True) # Take Control of Damper
time.sleep(1)
print('Moving Damper to Night Zone')
self.damper_night_zone()
time.sleep(damper_movement_time)
print('Moved Damper to Night Zone')
self.detect_damper_position(calibrate = True)
print('Night Zone Damper Position', self.damper_position)
print('Changing Night Zone Damper Position from', self.damper_night_position, 'to', self.damper_position)
self.damper_night_position = self.damper_position
print('Moving Damper to Day Zone')
self.damper_day_zone()
time.sleep(damper_movement_time)
print('Moved Damper to Day Zone')
self.detect_damper_position(calibrate = True)
print('Day Zone Damper Position', self.damper_position)
print('Changing Day Zone Damper Position from', self.damper_day_position, 'to', self.damper_position)
self.damper_day_position = self.damper_position
print('Relinquishing Control of Damper')
self.damper_control_state = False # Flag that the damper is no longer being controlled
GPIO.output(self.damper_control, False) # Relinquish Control of Damper
time.sleep(1)
def shutdown_cleanup(self):
self.print_status("Northcliff Aircon Controller shutting down on ")
self.process_thermo_off_command() #Turn Aircon off
GPIO.cleanup()
self.client.loop_stop() #Stop monitoring mqtt thread
self.spi.close()
sys.exit(0)
### End of methods called in the main loop ###
### Debugging methods ###
def capture_and_print_serial(self): # Only used for serial comms debugging
self.controller_msg = self.aircon_comms.read(8)
print(str(self.controller_msg))
def capture_and_file_serial_data(self, capture_file_name): # Only used for serial comms debugging
a = 0
with open(capture_file_name, "wb+") as f:
while a <= 1000:
self.controller_msg = self.aircon_comms.read(8)
f.write(self.controller_msg)
print(str(a) + str(self.controller_msg))
a = a + 1
### End end of debugging methods ###
### Main Loop ###
def run(self):
try:
self.startup()
while True:
self.process_home_manager_heartbeat() # Send heartbeat to Home Manager every 120 loops.
if self.enable_serial_comms_loop == True:
self.aircon_comms.flushInput() # remove sent packets from aircon comms buffer
self.build_packets(self.packet_1_dictionary, self.packet_3_dictionary) # Build Packets 1 and 3
self.send_serial_aircon_data(self.packet_1_send) # Send Packet 1 to aircon comms port
time.sleep(0.160) # Wait until Packet 1 has been sent before clearing aircon comms buffer
self.aircon_comms.flushInput() # remove sent packets from aircon comms buffer
time.sleep(0.15) # Gap between Packets 1 and 2
self.receive_serial_aircon_data() # Receive Packet 2 and decode it
if self.packet_2_error == False: #Only send packet 3 if packet 2 was OK
time.sleep(0.16) # Gap between Packets 2 and 3
self.send_serial_aircon_data(self.packet_3_send) # Send Packet 3
self.packet_3_dictionary["1Header3"] = self.calculate_next_sequence_number(self.packet_3_dictionary["1Header3"]) # Set up the sequence number for the next transmission of Packet 3
else:
print("Packet 3 not sent because of Packet 2 error")
time.sleep(0.45) # Wait until Packet 3 has been sent, plus 0.05 sec gap (or equivalent time if it isn't sent)
self.detect_damper_position(calibrate = False) # Determine the damper's current position
self.adjust_damper_position() # Adjusts damper position if the current damper position is different from the requested damper position
else:
if self.remote_operation_on == True: # This ensures that the disconnect is only done once
self.remote_operation_on = False # Flag that the aircon is not being controlled
GPIO.output(self.control_enable, False) # Relinquish Control of the aircon
self.damper_control_state = False # Flag that the damper is no longer being controlled
GPIO.output(self.damper_control, False) # Relinquish Control of Damper
self.damper_day_zone() # Turn Damper Zone and Stop relays Off
self.heartbeat_count = 0 # Reset the heartbeat count to start from zero when Home Manager comms is restored
if self.no_heartbeat_ack == True:
self.malfunction = True
else:
self.malfunction = False #Clear Malfunction Flag (Packets might be corrupted on disconnect) unless there's a loss of heartbeat
self.update_status()
else:
time.sleep (1)
except KeyboardInterrupt:
self.shutdown_cleanup()
### End of Main Loop ###
if __name__ =='__main__':
controller = NorthcliffAirconController(calibrate_damper_on_startup = False)
controller.run()
| 58.285714 | 322 | 0.644634 | [
"MIT"
] | roscoe81/Aircon-Controller | Northcliff_Aircon_Controller.py | 38,352 | Python |
from django.utils.version import get_version
VERSION = (3, 1, 6, "final", 0)
__version__ = get_version(VERSION)
def setup(set_prefix=True):
"""
Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
Set the thread-local urlresolvers script prefix if `set_prefix` is True.
"""
from django.apps import apps
from django.conf import settings
from django.urls import set_script_prefix
from django.utils.log import configure_logging
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
if set_prefix:
set_script_prefix(
"/" if settings.FORCE_SCRIPT_NAME is None else settings.FORCE_SCRIPT_NAME
)
apps.populate(settings.INSTALLED_APPS)
| 31.96 | 85 | 0.732165 | [
"MIT"
] | nverbois/TFE21-232 | [email protected]/Lib/site-packages/django/__init__.py | 799 | Python |
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as AutomotiveProvider
class Provider(AutomotiveProvider):
# from
# https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_the_United_Kingdom
license_formats = (
'??## ???',
'??##???'
)
| 21.785714 | 85 | 0.67541 | [
"BSD-3-Clause"
] | AMuratTuran/mkn | oscar/lib/python2.7/site-packages/faker/providers/automotive/en_GB/__init__.py | 305 | Python |
import mysql_conn
class BaseField:
def __init__(self,name,column_type,primary_key,default):
self.name=name
self.column_type=column_type
self.primary_key=primary_key
self.default=default
class StringField(BaseField):
def __init__(self,name,column_type='varchar(200)',primary_key=False,default=None):
super().__init__(name,column_type,primary_key,default)
class IntegerField(BaseField):
def __init__(self,name,column_type='int',primary_key=False,default=0):
super().__init__(name, column_type, primary_key, default)
class ModelsMeta(type):
def __new__(cls,name,bases,attr):
if name=='Models':
return type.__new__(cls,name,bases,attr)
table_name=attr.get('table_name',None)
if not table_name:
table_name=name
primary_key=None
mappings=dict()
for k,v in attr.items():
if isinstance(v,BaseField):
mappings[k]=v
if v.primary_key:
if primary_key:
raise TypeError('主键重复')
primary_key=k
for k in mappings.keys():
attr.pop(k)
if not primary_key:
raise TypeError('没有主键')
attr['mappings']=mappings
attr['primary_key']=primary_key
attr['table_name']=table_name
return type.__new__(cls,name,bases,attr)
class Models(dict,metaclass=ModelsMeta):
def __init__(self,**kwargs):
super().__init__(**kwargs)
def __setattr__(self, key, value):
self[key]=value
def __getattr__(self, item):
try:
return self[item]
except BaseException:
raise TypeError('没有这个属性')
@classmethod
def select_one(cls,**kwargs):
key=list(kwargs.keys())[0]
value=kwargs[key]
sql='select * from %s where %s=?'%(cls.table_name,key)
sql=sql.replace('?','%s')
ms=mysql_conn.Mysql()
re=ms.select(sql,value)
if re:
return cls(**re[0])
else:
return
@classmethod
def select_many(cls,**kwargs):
ms=mysql_conn.Mysql()
if kwargs:
key = list(kwargs.keys())[0]
value = kwargs[key]
sql = 'select * from %s where %s=?' % (cls.table_name, key)
sql = sql.replace('?', '%s')
re = ms.select(sql, value)
else:
sql='select * from %s' %(cls.table_name)
re = ms.select(sql, None)
if re:
return list(cls(**r) for r in re)
else:
return
def update(self):
ms=mysql_conn.Mysql()
field_list=[]
field_list_value=[]
primary_key_value=None
for k,v in self.mappings.items():
if v.primary_key:
primary_key_value=getattr(self,v.name,None)
else:
field_list.append(v.name+'=?')
field_list_value.append(getattr(self,v.name,v.default))
sql='update %s set %s where %s = %s'%(self.table_name,','.join(field_list),self.primary_key,primary_key_value)
sql=sql.replace('?','%s')
ms.execute(sql,field_list_value)
def save(self):
ms = mysql_conn.Mysql()
field_list = []
field_list_value = []
char_list=[]
for k, v in self.mappings.items():
if not v.primary_key:
field_list.append(v.name)
char_list.append('?')
field_list_value.append(getattr(self,v.name,v.default))
sql='insert into %s(%s) value(%s)'%(self.table_name,','.join(field_list),','.join(char_list))
sql=sql.replace('?','%s')
ms.execute(sql,field_list_value)
class User(Models):
table_name='user'
id=IntegerField('id',primary_key=True)
name=StringField('name')
password=StringField('password')
if __name__ == '__main__':
# user=User.select_one(id=1)
# user.name='这测试1111'
# user.update()
# print(user)
user=User(name='miaoqinian',password='xxx')
user.save()
| 28.816901 | 118 | 0.571114 | [
"MIT"
] | miaoqinian/myorm | orm1.py | 4,126 | Python |
import copy
import os
import logging
import pickle
from typing import Dict, List, Optional, Union
try:
import sigopt as sgo
Connection = sgo.Connection
except ImportError:
sgo = None
Connection = None
from ray.tune.suggest import Searcher
logger = logging.getLogger(__name__)
class SigOptSearch(Searcher):
"""A wrapper around SigOpt to provide trial suggestions.
You must install SigOpt and have a SigOpt API key to use this module.
Store the API token as an environment variable ``SIGOPT_KEY`` as follows:
.. code-block:: bash
pip install -U sigopt
export SIGOPT_KEY= ...
You will need to use the `SigOpt experiment and space specification
<https://app.sigopt.com/docs/overview/create>`_.
This module manages its own concurrency.
Parameters:
space (list of dict): SigOpt configuration. Parameters will be sampled
from this configuration and will be used to override
parameters generated in the variant generation process.
Not used if existing experiment_id is given
name (str): Name of experiment. Required by SigOpt.
max_concurrent (int): Number of maximum concurrent trials supported
based on the user's SigOpt plan. Defaults to 1.
connection (Connection): An existing connection to SigOpt.
experiment_id (str): Optional, if given will connect to an existing
experiment. This allows for a more interactive experience with
SigOpt, such as prior beliefs and constraints.
observation_budget (int): Optional, can improve SigOpt performance.
project (str): Optional, Project name to assign this experiment to.
SigOpt can group experiments by project
metric (str or list(str)): If str then the training result
objective value attribute. If list(str) then a list of
metrics that can be optimized together. SigOpt currently
supports up to 2 metrics.
mode (str or list(str)): If experiment_id is given then this
field is ignored, If str then must be one of {min, max}.
If list then must be comprised of {min, max, obs}. Determines
whether objective is minimizing or maximizing the metric
attribute. If metrics is a list then mode must be a list
of the same length as metric.
Example:
.. code-block:: python
space = [
{
'name': 'width',
'type': 'int',
'bounds': {
'min': 0,
'max': 20
},
},
{
'name': 'height',
'type': 'int',
'bounds': {
'min': -100,
'max': 100
},
},
]
algo = SigOptSearch(
space, name="SigOpt Example Experiment",
max_concurrent=1, metric="mean_loss", mode="min")
Example:
.. code-block:: python
space = [
{
'name': 'width',
'type': 'int',
'bounds': {
'min': 0,
'max': 20
},
},
{
'name': 'height',
'type': 'int',
'bounds': {
'min': -100,
'max': 100
},
},
]
algo = SigOptSearch(
space, name="SigOpt Multi Objective Example Experiment",
max_concurrent=1, metric=["average", "std"], mode=["max", "min"])
"""
OBJECTIVE_MAP = {
"max": {
"objective": "maximize",
"strategy": "optimize"
},
"min": {
"objective": "minimize",
"strategy": "optimize"
},
"obs": {
"strategy": "store"
}
}
def __init__(self,
space: List[Dict] = None,
name: str = "Default Tune Experiment",
max_concurrent: int = 1,
connection: Optional[Connection] = None,
experiment_id: Optional[str] = None,
observation_budget: Optional[int] = None,
project: Optional[str] = None,
metric: Union[None, str, List[str]] = "episode_reward_mean",
mode: Union[None, str, List[str]] = "max",
points_to_evaluate: Optional[List[Dict]] = None,
**kwargs):
assert (experiment_id is
None) ^ (space is None), "space xor experiment_id must be set"
assert type(max_concurrent) is int and max_concurrent > 0
if connection is not None:
self.conn = connection
else:
assert sgo is not None, """SigOpt must be installed!
You can install SigOpt with the command:
`pip install -U sigopt`."""
assert "SIGOPT_KEY" in os.environ, \
"SigOpt API key must be stored as " \
"environ variable at SIGOPT_KEY"
# Create a connection with SigOpt API, requires API key
self.conn = sgo.Connection(client_token=os.environ["SIGOPT_KEY"])
self._max_concurrent = max_concurrent
if isinstance(metric, str):
metric = [metric]
mode = [mode]
self._metric = metric
self._live_trial_mapping = {}
if experiment_id is None:
sigopt_params = dict(
name=name,
parameters=space,
parallel_bandwidth=self._max_concurrent)
if observation_budget is not None:
sigopt_params["observation_budget"] = observation_budget
if project is not None:
sigopt_params["project"] = project
if len(metric) > 1 and observation_budget is None:
raise ValueError(
"observation_budget is required for an"
"experiment with more than one optimized metric")
sigopt_params["metrics"] = self.serialize_metric(metric, mode)
self.experiment = self.conn.experiments().create(**sigopt_params)
else:
self.experiment = self.conn.experiments(experiment_id).fetch()
self._points_to_evaluate = points_to_evaluate
super(SigOptSearch, self).__init__(metric=metric, mode=mode, **kwargs)
def suggest(self, trial_id: str):
if self._max_concurrent:
if len(self._live_trial_mapping) >= self._max_concurrent:
return None
suggestion_kwargs = {}
if self._points_to_evaluate:
config = self._points_to_evaluate.pop(0)
suggestion_kwargs = {"assignments": config}
# Get new suggestion from SigOpt
suggestion = self.conn.experiments(
self.experiment.id).suggestions().create(**suggestion_kwargs)
self._live_trial_mapping[trial_id] = suggestion.id
return copy.deepcopy(suggestion.assignments)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
"""Notification for the completion of trial.
If a trial fails, it will be reported as a failed Observation, telling
the optimizer that the Suggestion led to a metric failure, which
updates the feasible region and improves parameter recommendation.
Creates SigOpt Observation object for trial.
"""
if result:
payload = dict(
suggestion=self._live_trial_mapping[trial_id],
values=self.serialize_result(result))
self.conn.experiments(
self.experiment.id).observations().create(**payload)
# Update the experiment object
self.experiment = self.conn.experiments(self.experiment.id).fetch()
elif error:
# Reports a failed Observation
self.conn.experiments(self.experiment.id).observations().create(
failed=True, suggestion=self._live_trial_mapping[trial_id])
del self._live_trial_mapping[trial_id]
@staticmethod
def serialize_metric(metrics: List[str], modes: List[str]):
"""
Converts metrics to https://app.sigopt.com/docs/objects/metric
"""
serialized_metric = []
for metric, mode in zip(metrics, modes):
serialized_metric.append(
dict(name=metric, **SigOptSearch.OBJECTIVE_MAP[mode].copy()))
return serialized_metric
def serialize_result(self, result: Dict):
"""
Converts experiments results to
https://app.sigopt.com/docs/objects/metric_evaluation
"""
missing_scores = [
metric for metric in self._metric if metric not in result
]
if missing_scores:
raise ValueError(
f"Some metrics specified during initialization are missing. "
f"Missing metrics: {missing_scores}, provided result {result}")
values = []
for metric in self._metric:
value = dict(name=metric, value=result[metric])
values.append(value)
return values
def save(self, checkpoint_path: str):
trials_object = (self.experiment.id, self._live_trial_mapping,
self._points_to_evaluate)
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(trials_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
trials_object = pickle.load(inputFile)
experiment_id, self._live_trial_mapping, self._points_to_evaluate = \
trials_object
self.experiment = self.conn.experiments(experiment_id).fetch()
| 36.090253 | 79 | 0.571371 | [
"Apache-2.0"
] | Actexpler/ray | python/ray/tune/suggest/sigopt.py | 9,997 | Python |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/13a_learner.ipynb (unless otherwise specified).
__all__ = ['CancelFitException', 'CancelEpochException', 'CancelTrainException', 'CancelValidException',
'CancelBatchException', 'replacing_yield', 'mk_metric', 'save_model', 'load_model', 'Learner',
'VerboseCallback', 'Metric', 'AvgMetric', 'AvgLoss', 'AvgSmoothLoss', 'Recorder', 'FetchPreds',
'load_learner']
# Cell
from .data.all import *
from .optimizer import *
from .callback.core import *
# Cell
defaults.lr = 1e-3
# Cell
def replacing_yield(o, attr, val):
"Context manager to temporarily replace an attribute"
old = getattr(o,attr)
try: yield setattr(o,attr,val)
finally: setattr(o,attr,old)
# Cell
def mk_metric(m):
"Convert `m` to an `AvgMetric`, unless it's already a `Metric`"
return m if isinstance(m, Metric) else AvgMetric(m)
# Cell
def save_model(file, model, opt, with_opt=True):
"Save `model` to `file` along with `opt` (if available, and if `with_opt`)"
if opt is None: with_opt=False
state = get_model(model).state_dict()
if with_opt: state = {'model': state, 'opt':opt.state_dict()}
torch.save(state, file)
# Cell
def load_model(file, model, opt, with_opt=None, device=None, strict=True):
"Load `model` from `file` along with `opt` (if available, and if `with_opt`)"
if isinstance(device, int): device = torch.device('cuda', device)
elif device is None: device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = set(state)=={'model', 'opt'}
model_state = state['model'] if hasopt else state
get_model(model).load_state_dict(model_state, strict=strict)
if hasopt and ifnone(with_opt,True):
try: opt.load_state_dict(state['opt'])
except:
if with_opt: warn("Could not load the optimizer state.")
elif with_opt: warn("Saved filed doesn't contain an optimizer state.")
# Cell
def _try_concat(o):
try: return torch.cat(o)
except: return sum([L(o_[i,:] for i in range_of(o_)) for o_ in o], L())
# Cell
from contextlib import ExitStack
# Cell
_before_epoch = [event.begin_fit, event.begin_epoch]
_after_epoch = [event.after_epoch, event.after_fit]
# Cell
class Learner():
def __init__(self, dls, model, loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=trainable_params, cbs=None,
metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True,
moms=(0.95,0.85,0.95)):
store_attr(self, "dls,model,opt_func,lr,splitter,model_dir,wd,wd_bn_bias,train_bn,metrics,moms")
self.training,self.create_mbar,self.logger,self.opt,self.cbs = False,True,print,None,L()
if loss_func is None:
loss_func = getattr(dls.train_ds, 'loss_func', None)
assert loss_func is not None, "Could not infer loss function from the data, please pass a loss function."
self.loss_func = loss_func
self.path = path if path is not None else getattr(dls, 'path', Path('.'))
self.add_cbs([(cb() if isinstance(cb, type) else cb) for cb in L(defaults.callbacks)+L(cbs)])
self.model.to(self.dls.device)
if hasattr(self.model, 'reset'): self.model.reset()
self.epoch,self.n_epoch,self.loss = 0,1,tensor(0.)
@property
def metrics(self): return self._metrics
@metrics.setter
def metrics(self,v): self._metrics = L(v).map(mk_metric)
def add_cbs(self, cbs): L(cbs).map(self.add_cb)
def remove_cbs(self, cbs): L(cbs).map(self.remove_cb)
def add_cb(self, cb):
old = getattr(self, cb.name, None)
assert not old or isinstance(old, type(cb)), f"self.{cb.name} already registered"
cb.learn = self
setattr(self, cb.name, cb)
self.cbs.append(cb)
return self
def remove_cb(self, cb):
cb.learn = None
if hasattr(self, cb.name): delattr(self, cb.name)
if cb in self.cbs: self.cbs.remove(cb)
@contextmanager
def added_cbs(self, cbs):
self.add_cbs(cbs)
yield
self.remove_cbs(cbs)
def ordered_cbs(self, cb_func): return [cb for cb in sort_by_run(self.cbs) if hasattr(cb, cb_func)]
def __call__(self, event_name): L(event_name).map(self._call_one)
def _call_one(self, event_name):
assert hasattr(event, event_name)
[cb(event_name) for cb in sort_by_run(self.cbs)]
def _bn_bias_state(self, with_bias): return bn_bias_params(self.model, with_bias).map(self.opt.state)
def create_opt(self):
self.opt = self.opt_func(self.splitter(self.model), lr=self.lr)
if not self.wd_bn_bias:
for p in self._bn_bias_state(True ): p['do_wd'] = False
if self.train_bn:
for p in self._bn_bias_state(False): p['force_train'] = True
def _split(self, b):
i = getattr(self.dls, 'n_inp', 1 if len(b)==1 else len(b)-1)
self.xb,self.yb = b[:i],b[i:]
def all_batches(self):
self.n_iter = len(self.dl)
for o in enumerate(self.dl): self.one_batch(*o)
def one_batch(self, i, b):
self.iter = i
try:
self._split(b); self('begin_batch')
self.pred = self.model(*self.xb); self('after_pred')
if len(self.yb) == 0: return
self.loss = self.loss_func(self.pred, *self.yb); self('after_loss')
if not self.training: return
self.loss.backward(); self('after_backward')
self.opt.step(); self('after_step')
self.opt.zero_grad()
except CancelBatchException: self('after_cancel_batch')
finally: self('after_batch')
def _do_begin_fit(self, n_epoch):
self.n_epoch,self.loss = n_epoch,tensor(0.); self('begin_fit')
def _do_epoch_train(self):
try:
self.dl = self.dls.train; self('begin_train')
self.all_batches()
except CancelTrainException: self('after_cancel_train')
finally: self('after_train')
def _do_epoch_validate(self, ds_idx=1, dl=None):
if dl is None: dl = self.dls[ds_idx]
names = ['shuffle', 'drop_last']
try:
dl,old,has = change_attrs(dl, names, [False,False])
self.dl = dl; self('begin_validate')
with torch.no_grad(): self.all_batches()
except CancelValidException: self('after_cancel_validate')
finally:
dl,*_ = change_attrs(dl, names, old, has); self('after_validate')
def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False):
with self.added_cbs(cbs):
if reset_opt or not self.opt: self.create_opt()
if wd is None: wd = self.wd
if wd is not None: self.opt.set_hypers(wd=wd)
self.opt.set_hypers(lr=self.lr if lr is None else lr)
try:
self._do_begin_fit(n_epoch)
for epoch in range(n_epoch):
try:
self.epoch=epoch; self('begin_epoch')
self._do_epoch_train()
self._do_epoch_validate()
except CancelEpochException: self('after_cancel_epoch')
finally: self('after_epoch')
except CancelFitException: self('after_cancel_fit')
finally: self('after_fit')
def validate(self, ds_idx=1, dl=None, cbs=None):
if dl is None: dl = self.dls[ds_idx]
with self.added_cbs(cbs), self.no_logging(), self.no_mbar():
self(_before_epoch)
self._do_epoch_validate(ds_idx, dl)
self(_after_epoch)
return getattr(self, 'final_record', None)
@delegates(GatherPredsCallback.__init__)
def get_preds(self, ds_idx=1, dl=None, with_input=False, with_decoded=False, with_loss=False, act=None,
inner=False, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffled=False, drop_last=False)
cb = GatherPredsCallback(with_input=with_input, with_loss=with_loss, **kwargs)
#with self.no_logging(), self.added_cbs(cb), self.loss_not_reduced(), self.no_mbar():
ctx_mgrs = [self.no_logging(), self.added_cbs(cb), self.no_mbar()]
if with_loss: ctx_mgrs.append(self.loss_not_reduced())
with ExitStack() as stack:
for mgr in ctx_mgrs: stack.enter_context(mgr)
self(event.begin_epoch if inner else _before_epoch)
self._do_epoch_validate(dl=dl)
self(event.after_epoch if inner else _after_epoch)
if act is None: act = getattr(self.loss_func, 'activation', noop)
res = cb.all_tensors()
pred_i = 1 if with_input else 0
if res[pred_i] is not None:
res[pred_i] = act(res[pred_i])
if with_decoded: res.insert(pred_i+2, getattr(self.loss_func, 'decodes', noop)(res[pred_i]))
return tuple(res)
def predict(self, item, rm_type_tfms=None, with_input=False):
dl = self.dls.test_dl([item], rm_type_tfms=rm_type_tfms)
inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
dec = self.dls.decode_batch((*tuplify(inp),*tuplify(dec_preds)))[0]
i = getattr(self.dls, 'n_inp', -1)
dec_inp,dec_targ = map(detuplify, [dec[:i],dec[i:]])
res = dec_targ,dec_preds[0],preds[0]
if with_input: res = (dec_inp,) + res
return res
def show_results(self, ds_idx=1, dl=None, max_n=9, shuffle=True, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffle=shuffle)
b = dl.one_batch()
_,_,preds = self.get_preds(dl=[b], with_decoded=True)
self.dls.show_results(b, preds, max_n=max_n, **kwargs)
def show_training_loop(self):
indent = 0
for s in _loop:
if s.startswith('Start'): print(f'{" "*indent}{s}'); indent += 2
elif s.startswith('End'): indent -= 2; print(f'{" "*indent}{s}')
else: print(f'{" "*indent} - {s:15}:', self.ordered_cbs(s))
@contextmanager
def no_logging(self): return replacing_yield(self, 'logger', noop)
@contextmanager
def no_mbar(self): return replacing_yield(self, 'create_mbar', False)
@contextmanager
def loss_not_reduced(self):
if hasattr(self.loss_func, 'reduction'): return replacing_yield(self.loss_func, 'reduction', 'none')
else: return replacing_yield(self, 'loss_func', partial(self.loss_func, reduction='none'))
def save(self, file, with_opt=True):
if rank_distrib(): return # don't save if slave proc
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
save_model(file, self.model, getattr(self,'opt',None), with_opt)
def load(self, file, with_opt=None, device=None, strict=True):
if device is None: device = self.dls.device
if self.opt is None: self.create_opt()
distrib_barrier()
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
load_model(file, self.model, self.opt, with_opt=with_opt, device=device, strict=strict)
return self
Learner.x,Learner.y = add_props(lambda i,x: detuplify((x.xb,x.yb)[i]))
# Cell
add_docs(Learner, "Group together a `model`, some `dls` and a `loss_func` to handle training",
add_cbs="Add `cbs` to the list of `Callback` and register `self` as their learner",
add_cb="Add `cb` to the list of `Callback` and register `self` as their learner",
remove_cbs="Remove `cbs` from the list of `Callback` and deregister `self` as their learner",
remove_cb="Add `cb` from the list of `Callback` and deregister `self` as their learner",
added_cbs="Context manage that temporarily adds `cbs`",
ordered_cbs="Return a list of `Callback` for one step `cb_func` in the training loop",
create_opt="Create an optimizer with `lr`",
one_batch="Train or evaluate `self.model` on batch `(xb,yb)`",
all_batches="Train or evaluate `self.model` on all batches of `self.dl`",
fit="Fit `self.model` for `n_epoch` using `cbs`. Optionally `reset_opt`.",
validate="Validate on `dl` with potential new `cbs`.",
get_preds="Get the predictions and targets on the `ds_idx`-th dbunchset or `dl`, optionally `with_input` and `with_loss`",
predict="Return the prediction on `item`, fully decoded, loss function decoded and probabilities",
show_results="Show some predictions on `ds_idx`-th dbunchset or `dl`",
show_training_loop="Show each step in the training loop",
no_logging="Context manager to temporarily remove `logger`",
no_mbar="Context manager to temporarily prevent the master progress bar from being created",
loss_not_reduced="A context manager to evaluate `loss_func` with reduction set to none.",
save="Save model and optimizer state (if `with_opt`) to `self.path/self.model_dir/file`",
load="Load model and optimizer state (if `with_opt`) from `self.path/self.model_dir/file` using `device`"
)
# Cell
class VerboseCallback(Callback):
"Callback that prints the name of each event called"
def __call__(self, event_name):
print(event_name)
super().__call__(event_name)
# Cell
@docs
class Metric():
"Blueprint for defining a metric"
def reset(self): pass
def accumulate(self, learn): pass
@property
def value(self): raise NotImplementedError
@property
def name(self): return class2attr(self, 'Metric')
_docs = dict(
reset="Reset inner state to prepare for new computation",
name="Name of the `Metric`, camel-cased and with Metric removed",
accumulate="Use `learn` to update the state with new results",
value="The value of the metric")
# Cell
def _maybe_reduce(val):
if num_distrib()>1:
val = val.clone()
torch.distributed.all_reduce(val, op=torch.distributed.ReduceOp.SUM)
val /= num_distrib()
return val
# Cell
class AvgMetric(Metric):
"Average the values of `func` taking into account potential different batch sizes"
def __init__(self, func): self.func = func
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += to_detach(self.func(learn.pred, *learn.yb))*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__
# Cell
class AvgLoss(Metric):
"Average the losses taking into account potential different batch sizes"
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += to_detach(learn.loss.mean())*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return "loss"
# Cell
class AvgSmoothLoss(Metric):
"Smooth average of the losses (exponentially weighted with `beta`)"
def __init__(self, beta=0.98): self.beta = beta
def reset(self): self.count,self.val = 0,tensor(0.)
def accumulate(self, learn):
self.count += 1
self.val = torch.lerp(to_detach(learn.loss.mean(), gather=False), self.val, self.beta)
@property
def value(self): return self.val/(1-self.beta**self.count)
# Cell
from fastprogress.fastprogress import format_time
def _maybe_item(t):
t = t.value
return t.item() if isinstance(t, Tensor) and t.numel()==1 else t
# Cell
class Recorder(Callback):
"Callback that registers statistics (lr, loss and metrics) during training"
run_after = TrainEvalCallback
def __init__(self, add_time=True, train_metrics=False, valid_metrics=True, beta=0.98):
store_attr(self, 'add_time,train_metrics,valid_metrics')
self.loss,self.smooth_loss = AvgLoss(),AvgSmoothLoss(beta=beta)
def begin_fit(self):
"Prepare state for training"
self.lrs,self.iters,self.losses,self.values = [],[],[],[]
names = self.metrics.attrgot('name')
if self.train_metrics and self.valid_metrics:
names = L('loss') + names
names = names.map('train_{}') + names.map('valid_{}')
elif self.valid_metrics: names = L('train_loss', 'valid_loss') + names
else: names = L('train_loss') + names
if self.add_time: names.append('time')
self.metric_names = 'epoch'+names
self.smooth_loss.reset()
def after_batch(self):
"Update all metrics and records lr and smooth loss in training"
if len(self.yb) == 0: return
mets = self._train_mets if self.training else self._valid_mets
for met in mets: met.accumulate(self.learn)
if not self.training: return
self.lrs.append(self.opt.hypers[-1]['lr'])
self.losses.append(self.smooth_loss.value)
self.learn.smooth_loss = self.smooth_loss.value
def begin_epoch(self):
"Set timer if `self.add_time=True`"
self.cancel_train,self.cancel_valid = False,False
if self.add_time: self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0))
def begin_train (self): self._train_mets[1:].map(Self.reset())
def begin_validate(self): self._valid_mets.map(Self.reset())
def after_train (self): self.log += self._train_mets.map(_maybe_item)
def after_validate(self): self.log += self._valid_mets.map(_maybe_item)
def after_cancel_train(self): self.cancel_train = True
def after_cancel_validate(self): self.cancel_valid = True
def after_epoch(self):
"Store and log the loss/metric values"
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time: self.log.append(format_time(time.time() - self.start_epoch))
self.logger(self.log)
self.iters.append(self.smooth_loss.count)
@property
def _train_mets(self):
if getattr(self, 'cancel_train', False): return L()
return L(self.smooth_loss) + (self.metrics if self.train_metrics else L())
@property
def _valid_mets(self):
if getattr(self, 'cancel_valid', False): return L()
return (L(self.loss) + self.metrics if self.valid_metrics else L())
def plot_loss(self, skip_start=5, with_valid=True):
plt.plot(list(range(skip_start, len(self.losses))), self.losses[skip_start:], label='train')
if with_valid:
idx = (np.array(self.iters)<skip_start).sum()
plt.plot(self.iters[idx:], L(self.values[idx:]).itemgot(1), label='valid')
plt.legend()
# Cell
add_docs(Recorder,
begin_train = "Reset loss and metrics state",
after_train = "Log loss and metric values on the training set (if `self.training_metrics=True`)",
begin_validate = "Reset loss and metrics state",
after_validate = "Log loss and metric values on the validation set",
after_cancel_train = "Ignore training metrics for this epoch",
after_cancel_validate = "Ignore validation metrics for this epoch",
plot_loss = "Plot the losses from `skip_start` and onward")
defaults.callbacks = [TrainEvalCallback, Recorder]
# Cell
class FetchPreds(Callback):
"A callback to fetch predictions during the training loop"
def __init__(self, ds_idx=1, dl=None, with_input=False, with_decoded=False):
store_attr(self, 'ds_idx,dl,with_input,with_decoded')
def after_validate(self):
learn,rec = self.learn,self.learn.recorder
learn.remove_cbs([self,rec])
self.preds = learn.get_preds(ds_idx=self.ds_idx, dl=self.dl,
with_input=self.with_input, with_decoded=self.with_decoded, inner=True)
learn.add_cbs([self, rec])
# Cell
@patch
def freeze_to(self:Learner, n):
if self.opt is None: self.create_opt()
self.opt.freeze_to(n)
self.opt.clear_state()
@patch
def freeze(self:Learner): self.freeze_to(-1)
@patch
def unfreeze(self:Learner): self.freeze_to(0)
add_docs(Learner,
freeze_to="Freeze parameter groups up to `n`",
freeze="Freeze up to last parameter group",
unfreeze="Unfreeze the entire model")
# Cell
@patch
def export(self:Learner, fname='export.pkl'):
"Export the content of `self` without the items and the optimizer state for inference"
if rank_distrib(): return # don't export if slave proc
old_dbunch = self.dls
self.dls = self.dls.new_empty()
state = self.opt.state_dict()
self.opt = None
with warnings.catch_warnings():
#To avoid the warning that come from PyTorch about model not being checked
warnings.simplefilter("ignore")
torch.save(self, self.path/fname)
self.create_opt()
self.opt.load_state_dict(state)
self.dls = old_dbunch
# Cell
def load_learner(fname, cpu=True):
"Load a `Learner` object in `fname`, optionally putting it on the `cpu`"
res = torch.load(fname, map_location='cpu' if cpu else None)
if hasattr(res, 'to_fp32'): res = res.to_fp32()
if cpu: res.dls.cpu()
return res
# Cell
@patch
def tta(self:Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False):
"Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation"
if dl is None: dl = self.dls[ds_idx]
if item_tfms is not None or batch_tfms is not None: dl = dl.new(after_item=item_tfms, after_batch=batch_tfms)
with dl.dataset.set_split_idx(0), self.no_mbar():
if hasattr(self,'progress'): self.progress.mbar = master_bar(list(range(n)))
aug_preds = []
for i in self.progress.mbar if hasattr(self,'progress') else range(n):
self.epoch = i #To keep track of progress on mbar since the progress callback will use self.epoch
aug_preds.append(self.get_preds(ds_idx, inner=True)[0][None])
aug_preds = torch.cat(aug_preds)
aug_preds = aug_preds.max(0)[0] if use_max else aug_preds.mean(0)
self.epoch = n
with dl.dataset.set_split_idx(1): preds,targs = self.get_preds(ds_idx, inner=True)
if use_max: return torch.stack([preds, aug_preds], 0).max(0)[0],targs
preds = (aug_preds,preds) if beta is None else torch.lerp(aug_preds, preds, beta)
return preds,targs | 43.911197 | 126 | 0.644245 | [
"Apache-2.0"
] | akashpalrecha/fastai2 | fastai2/learner.py | 22,746 | Python |
from django import template
from ..rocketchat import get_rc_id, get_rc_url, get_rc_ws_url
register = template.Library()
@register.inclusion_tag("rocketchat/chat.html", takes_context=True)
def chat(context):
user = getattr(context.get("request"), "user")
return {
"rocketchat_url": get_rc_url(),
"websocket_url": get_rc_ws_url(),
"user": user,
"user_id": user and user.is_authenticated and get_rc_id(user),
}
@register.inclusion_tag("rocketchat/livechat.html", takes_context=True)
def livechat(context):
return {
"rocketchat_url": get_rc_url(),
"user": getattr(context.get("request"), "user"),
}
| 26.76 | 71 | 0.68012 | [
"BSD-3-Clause"
] | leprikon-cz/leprikon | leprikon/templatetags/rocketchat.py | 669 | Python |
class dotnetPointList_t(object):
""" dotnetPointList_t(Size: int) """
def FromStruct(self, PointList):
""" FromStruct(self: dotnetPointList_t,PointList: PointList) """
pass
def ToStruct(self, PointList):
""" ToStruct(self: dotnetPointList_t,PointList: PointList) """
pass
@staticmethod
def __new__(self, Size):
"""
__new__[dotnetPointList_t]() -> dotnetPointList_t
__new__(cls: type,Size: int)
"""
pass
aPointList = None
ClientId = None
IndexCurrentItem = None
NumberItems = None
NumberItemsInSet = None
| 22.642857 | 73 | 0.604101 | [
"MIT"
] | YKato521/ironpython-stubs | release/stubs.min/Tekla/Structures/ModelInternal_parts/dotnetPointList_t.py | 634 | Python |
from typing import List, Optional
from google.cloud import ndb
from pyre_extensions import none_throws
from backend.common.models.event import Event
from backend.common.models.event_team import EventTeam
from backend.common.models.keys import EventKey
from backend.common.models.team import Team
from backend.common.queries.team_query import EventTeamsQuery
def preseed_teams(start_team: int, end_team: Optional[int] = None) -> List[ndb.Key]:
end_team = end_team or start_team
stored = ndb.put_multi(
[
Team(
id=f"frc{i}",
team_number=i,
)
for i in range(start_team, end_team + 1)
]
)
assert len(stored) == (end_team - start_team + 1)
return stored
def preseed_event_teams(team_keys: List[ndb.Key], event_key: EventKey) -> None:
event_teams = [
EventTeam(
id=f"{event_key}_{t.id()}",
event=ndb.Key(Event, event_key),
team=t,
year=int(event_key[:4]),
)
for t in team_keys
]
ndb.put_multi(event_teams)
def test_no_data() -> None:
teams = EventTeamsQuery(event_key="2020ct").fetch()
assert teams == []
def test_get_data() -> None:
stored_teams = preseed_teams(1, 10)
preseed_event_teams(stored_teams, "2020ct")
teams = EventTeamsQuery(event_key="2020ct").fetch()
assert len(teams) == len(stored_teams)
def test_affected_queries() -> None:
stored_teams1 = preseed_teams(1, 10)
stored_teams2 = preseed_teams(100, 110)
preseed_event_teams(stored_teams1, "2020aaa")
preseed_event_teams(stored_teams2, "2020bbb")
preseed_event_teams(stored_teams1 + stored_teams2, "2020ccc")
for team_key in stored_teams1:
assert {
q.cache_key
for q in EventTeamsQuery._eventteam_affected_queries(
event_key="2020aaa",
team_key=none_throws(team_key.string_id()),
year=2020,
)
} == {EventTeamsQuery(event_key="2020aaa").cache_key}
assert {
q.cache_key
for q in EventTeamsQuery._team_affected_queries(
team_key=none_throws(team_key.string_id())
)
} == {
EventTeamsQuery(event_key="2020aaa").cache_key,
EventTeamsQuery(event_key="2020ccc").cache_key,
}
for team_key in stored_teams2:
assert {
q.cache_key
for q in EventTeamsQuery._eventteam_affected_queries(
event_key="2020bbb",
team_key=none_throws(team_key.string_id()),
year=2020,
)
} == {EventTeamsQuery(event_key="2020bbb").cache_key}
assert {
q.cache_key
for q in EventTeamsQuery._team_affected_queries(
team_key=none_throws(team_key.string_id())
)
} == {
EventTeamsQuery(event_key="2020bbb").cache_key,
EventTeamsQuery(event_key="2020ccc").cache_key,
}
| 30.4 | 84 | 0.615461 | [
"MIT"
] | guineawheek/ftc-data-take-2 | src/backend/common/queries/tests/event_teams_query_test.py | 3,040 | Python |
"""Test case that checks the working of the utils/command/gen_uml.py module."""
from utils.model.gen_uml import generate
import importlib_metadata
class PseudoFile:
def __init__(self):
self.data = ""
def write(self, data):
self.data += data
def close(self):
pass
def test_loading():
dist = importlib_metadata.distribution("gaphor")
model_file = dist.locate_file("tests/test-model.gaphor")
outfile = PseudoFile()
generate(model_file, outfile)
assert outfile.data == GENERATED, f'"""{outfile.data}"""'
GENERATED = """# This file is generated by build_uml.py. DO NOT EDIT!
from gaphor.UML.properties import association, attribute, enumeration, derived, derivedunion, redefine
# class 'ValSpec' has been stereotyped as 'SimpleAttribute'
# class 'ShouldNotShowUp' has been stereotyped as 'SimpleAttribute' too
class Element: pass
class SubClass(Element): pass
class C: pass
class D(C): pass
C.attr = attribute('attr', str)
C.name1 = association('name1', SubClass, opposite='name2')
SubClass.name2 = association('name2', C, opposite='name1')
C.base = association('base', SubClass, opposite='abstract')
D.subbase = association('subbase', SubClass, opposite='concrete')
SubClass.concrete = association('concrete', D, opposite='subbase')
D.name3 = association('name3', SubClass, opposite='name4')
# 'SubClass.value' is a simple attribute
SubClass.value = attribute('value', str)
SubClass.abstract = derivedunion('abstract', C, 0, '*', SubClass.concrete)
SubClass.name4 = redefine(SubClass, 'name4', D, name2)
"""
| 32.081633 | 102 | 0.721374 | [
"Apache-2.0"
] | MarianelaSena/gaphor | tests/test_gen_uml.py | 1,572 | Python |
import tensorflow as tf
from current_net_conf import *
class RnnDropoutPlaceholders:
probability = tf.placeholder(tf.float32, [])
@staticmethod
def feed(prob=1.0):
return {RnnDropoutPlaceholders.probability: prob}
class MultiRnnWithDropout(tf.nn.rnn_cell.DropoutWrapper):
def __init__(self, num_layers, state_size):
internal_cells = [tf.nn.rnn_cell.GRUCell(state_size) for _ in range(num_layers)]
internal_cell = tf.nn.rnn_cell.MultiRNNCell(internal_cells)
super(MultiRnnWithDropout, self).__init__(
cell=internal_cell,
output_keep_prob=RnnDropoutPlaceholders.probability,
state_keep_prob=RnnDropoutPlaceholders.probability
)
def initial_state(self, first_layer_initial_state):
initial_state = self.zero_state(BATCH_SIZE, tf.float32)
initial_state = list(initial_state)
initial_state[0] = first_layer_initial_state
initial_state = tuple(initial_state)
return initial_state
@staticmethod
def zero_initial_inputs(size):
return tf.zeros([BATCH_SIZE, size], dtype=tf.float32)
class EncoderDropoutPlaceholders:
probability = tf.placeholder(tf.float32, [])
@staticmethod
def feed(prob=1.0):
return {EncoderDropoutPlaceholders.probability: prob}
def apply_dropout_to_encoder_rnn_cells(cells):
return [
tf.nn.rnn_cell.DropoutWrapper(
cell=cell,
output_keep_prob=EncoderDropoutPlaceholders.probability,
state_keep_prob=EncoderDropoutPlaceholders.probability
)
for cell in cells
]
| 30.641509 | 88 | 0.708128 | [
"MIT"
] | Saloed/PythonGenerator | model/rnn_with_dropout.py | 1,624 | Python |
import pytest
def test_1():
...
@pytest.mark.slow
def test_2():
...
@pytest.mark.skip
def test_3():
...
| 7.625 | 17 | 0.54918 | [
"MIT"
] | twotwo/python-pytest | tests/test_markers.py | 122 | Python |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="size", parent_name="histogram2dcontour.ybins", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| 29.642857 | 82 | 0.653012 | [
"MIT"
] | 1abner1/plotly.py | packages/python/plotly/plotly/validators/histogram2dcontour/ybins/_size.py | 415 | Python |
"""
This file offers the methods to automatically retrieve the graph Acidocella sp. MX-AZ02.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def AcidocellaSpMxAz02(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Acidocella sp. MX-AZ02 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Acidocella sp. MX-AZ02 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="AcidocellaSpMxAz02",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.142857 | 223 | 0.675862 | [
"MIT"
] | AnacletoLAB/ensmallen | bindings/python/ensmallen/datasets/string/acidocellaspmxaz02.py | 3,480 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2018 Michael J. Hayford
""" Support creation of an iPython console, with rayoptics environment
.. Created on Wed Nov 21 21:48:02 2018
.. codeauthor: Michael J. Hayford
"""
from qtconsole.rich_jupyter_widget import RichJupyterWidget
from qtconsole.inprocess import QtInProcessKernelManager
from IPython.lib import guisupport
from rayoptics.gui.appmanager import ModelInfo
def create_ipython_console(app, title, view_width, view_ht):
""" create a iPython console with a rayoptics environment """
opt_model = app.app_manager.model
if opt_model:
ro_env = {
'app': app,
'opm': opt_model,
'sm': opt_model.seq_model,
'osp': opt_model.optical_spec,
'pm': opt_model.parax_model
}
else:
ro_env = {
'app': app,
'opm': opt_model
}
ro_setup = 'from rayoptics.environment import *'
# construct the top level widget
ipy_console = ConsoleWidget()
# load the environment
ipy_console.execute_command(ro_setup)
ipy_console.push_vars(ro_env)
mi = ModelInfo(opt_model)
sub_window = app.add_subwindow(ipy_console, mi)
sub_window.setWindowTitle(title)
orig_x, orig_y = app.initial_window_offset()
sub_window.setGeometry(orig_x, orig_y, view_width, view_ht)
sub_window.show()
class ConsoleWidget(RichJupyterWidget):
def __init__(self, customBanner=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if customBanner is not None:
self.banner = customBanner
self.font_size = 6
self.kernel_manager = kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel(show_banner=False)
kernel_manager.kernel.gui = 'qt'
self.kernel_client = kernel_client = self.kernel_manager.client()
kernel_client.start_channels()
def stop():
kernel_client.stop_channels()
kernel_manager.shutdown_kernel()
guisupport.get_app_qt().exit()
self.exit_requested.connect(stop)
def push_vars(self, variableDict):
"""
Given a dictionary containing name / value pairs, push those variables
to the Jupyter console widget
"""
self.kernel_manager.kernel.shell.push(variableDict)
def clear(self):
"""
Clears the terminal
"""
self._control.clear()
# self.kernel_manager
def print_text(self, text):
"""
Prints some plain text to the console
"""
self._append_plain_text(text)
def execute_command(self, command):
"""
Execute a command in the frame of the console widget
"""
self._execute(command, False)
| 28.158416 | 78 | 0.639592 | [
"BSD-3-Clause"
] | NelisW/ray-optics | src/rayoptics/qtgui/ipyconsole.py | 2,845 | Python |
"""Phonopy QHA module."""
# Copyright (C) 2012 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from phonopy.units import Avogadro, EvTokJmol, EVAngstromToGPa
from phonopy.qha.eos import get_eos, fit_to_eos
class BulkModulus(object):
"""Bulk modulus class.
This class is used to calculate bulk modulus only from temperature
independent energy input.
"""
def __init__(self,
volumes,
energies,
eos='vinet'):
"""Init method.
volumes : array_like
Unit cell volumes where energies are obtained.
shape=(volumes, ), dtype='double'.
energies : array_like
Energies obtained at volumes.
shape=(volumes, ), dtype='double'.
eos : str
Identifier of equation of states function.
"""
self._volumes = volumes
if np.array(energies).ndim == 1:
self._energies = energies
else:
self._energies = energies[0]
self._eos = get_eos(eos)
self._energy = None
self._bulk_modulus = None
self._b_prime = None
try:
(self._energy,
self._bulk_modulus,
self._b_prime,
self._volume) = fit_to_eos(volumes,
self._energies,
self._eos)
except TypeError:
msg = ["Failed to fit to \"%s\" equation of states." % eos]
if len(volumes) < 4:
msg += ["At least 4 volume points are needed for the fitting."]
msg += ["Careful choice of volume points is recommended."]
raise RuntimeError("\n".join(msg))
@property
def bulk_modulus(self):
"""Return bulk modulus."""
return self._bulk_modulus
def get_bulk_modulus(self):
"""Return bulk modulus."""
warnings.warn("BulkModulus.get_bulk_modulus() is deprecated."
"Use BulkModulus.bulk_modulus attribute.",
DeprecationWarning)
return self.bulk_modulus
@property
def equilibrium_volume(self):
"""Return volume at equilibrium."""
return self._volume
def get_equilibrium_volume(self):
"""Return volume at equilibrium."""
warnings.warn("BulkModulus.get_equilibrium_volume() is deprecated."
"Use BulkModulus.equilibrium_volume attribute.",
DeprecationWarning)
return self.equilibrium_volume
@property
def b_prime(self):
"""Return fitted parameter B'."""
return self._b_prime
def get_b_prime(self):
"""Return fitted parameter B'."""
warnings.warn("BulkModulus.get_b_prime() is deprecated."
"Use BulkModulus.b_prime attribute.",
DeprecationWarning)
return self._b_prime
@property
def energy(self):
"""Return fitted parameter of energy."""
return self._energy
def get_energy(self):
"""Return fitted parameter of energy."""
warnings.warn("BulkModulus.get_energy() is deprecated."
"Use BulkModulus.energy attribute.",
DeprecationWarning)
return self._energy
def get_parameters(self):
"""Return fitted parameters."""
return (self._energy,
self._bulk_modulus,
self._b_prime,
self._volume)
def get_eos(self):
"""Return EOS function as a python method."""
warnings.warn("BulkModulus.get_eos() is deprecated.",
DeprecationWarning)
return self._eos
def plot(self):
"""Plot fitted EOS curve."""
import matplotlib.pyplot as plt
ep = self.get_parameters()
vols = self._volumes
volume_points = np.linspace(min(vols), max(vols), 201)
fig, ax = plt.subplots()
ax.plot(volume_points, self._eos(volume_points, *ep), 'r-')
ax.plot(vols, self._energies, 'bo', markersize=4)
return plt
class QHA(object):
"""Quasi harmonic approximation class."""
def __init__(self,
volumes, # angstrom^3
electronic_energies, # eV
temperatures, # K
cv, # J/K/mol
entropy, # J/K/mol
fe_phonon, # kJ/mol
eos='vinet',
t_max=None,
energy_plot_factor=None):
"""Init method.
Parameters
----------
volumes: array_like
Unit cell volumes (V) in angstrom^3.
dtype='double'
shape=(volumes,)
electronic_energies: array_like
Electronic energies (U_el) or electronic free energies (F_el) in eV.
It is assumed as formar if ndim==1 and latter if ndim==2.
dtype='double'
shape=(volumes,) or (temperatuers, volumes)
temperatures: array_like
Temperatures ascending order (T) in K.
dtype='double'
shape=(temperatures,)
cv: array_like
Phonon Heat capacity at constant volume in J/K/mol.
dtype='double'
shape=(temperatuers, volumes)
entropy: array_like
Phonon entropy at constant volume (S_ph) in J/K/mol.
dtype='double'
shape=(temperatuers, volumes)
fe_phonon: array_like
Phonon Helmholtz free energy (F_ph) in kJ/mol.
dtype='double'
shape=(temperatuers, volumes)
eos: str
Equation of state used for fitting F vs V.
'vinet', 'murnaghan' or 'birch_murnaghan'.
t_max: float
Maximum temperature to be calculated. This has to be not
greater than the temperature of the third element from the
end of 'temperatre' elements. If max_t=None, the temperature
of the third element from the end is used.
energy_plot_factor: float
This value is multiplied to energy like values only in plotting.
"""
self._volumes = np.array(volumes)
self._electronic_energies = np.array(electronic_energies)
self._all_temperatures = np.array(temperatures)
self._cv = np.array(cv)
self._entropy = np.array(entropy)
self._fe_phonon = np.array(fe_phonon) / EvTokJmol
self._eos = get_eos(eos)
self._t_max = t_max
self._energy_plot_factor = energy_plot_factor
self._temperatures = None
self._equiv_volumes = None
self._equiv_energies = None
self._equiv_bulk_modulus = None
self._equiv_parameters = None
self._free_energies = None
self._num_elems = None
self._thermal_expansions = None
self._cp_numerical = None
self._volume_entropy_parameters = None
self._volume_cv_parameters = None
self._volume_entropy = None
self._volume_cv = None
self._cp_polyfit = None
self._dsdv = None
self._gruneisen_parameters = None
self._len = None
@property
def thermal_expansion(self):
"""Return volumetric thermal expansion coefficients at temperatures."""
return self._thermal_expansions[:self._len]
@property
def helmholtz_volume(self):
"""Return Helmholtz free energies at temperatures and volumes."""
return self._free_energies[:self._len]
@property
def volume_temperature(self):
"""Return equilibrium volumes at temperatures."""
return self._equiv_volumes[:self._len]
@property
def gibbs_temperature(self):
"""Return Gibbs free energies at temperatures."""
return self._equiv_energies[:self._len]
@property
def bulk_modulus_temperature(self):
"""Return bulk modulus vs temperature data."""
return self._equiv_bulk_modulus[:self._len]
@property
def heat_capacity_P_numerical(self):
"""Return heat capacities at constant pressure at temperatures.
Values are computed by numerical derivative of Gibbs free energy.
"""
return self._cp_numerical[:self._len]
@property
def heat_capacity_P_polyfit(self):
"""Return heat capacities at constant pressure at temperatures.
Volumes are computed in another way to heat_capacity_P_numerical
for the better numerical behaviour. But this does not work
when temperature dependent electronic_energies is supplied.
"""
if self._electronic_energies.ndim == 1:
return self._cp_polyfit[:self._len]
else:
return None
@property
def gruneisen_temperature(self):
"""Return Gruneisen parameters at temperatures."""
return self._gruneisen_parameters[:self._len]
def run(self, verbose=False):
"""Fit parameters to EOS at temperatures.
Even if fitting failed, simply omit the volume point. In this case,
the failed temperature point doesn't exist in the returned arrays.
"""
if verbose:
print(("#%11s" + "%14s" * 4) % ("T", "E_0", "B_0", "B'_0", "V_0"))
# Plus one temperature point is necessary for computing e.g. beta.
num_elems = self._get_num_elems(self._all_temperatures) + 1
if num_elems > len(self._all_temperatures):
num_elems -= 1
temperatures = []
parameters = []
free_energies = []
for i in range(num_elems): # loop over temperaturs
if self._electronic_energies.ndim == 1:
el_energy = self._electronic_energies
else:
el_energy = self._electronic_energies[i]
fe = [ph_e + el_e
for ph_e, el_e in zip(self._fe_phonon[i], el_energy)]
try:
ep = fit_to_eos(self._volumes, fe, self._eos)
except TypeError:
print("Fitting failure at T=%.1f" % self._all_temperatures[i])
if ep is None:
# Simply omit volume point where the fitting failed.
continue
else:
[ee, eb, ebp, ev] = ep
t = self._all_temperatures[i]
temperatures.append(t)
parameters.append(ep)
free_energies.append(fe)
if verbose:
print(("%14.6f" * 5) %
(t, ep[0], ep[1] * EVAngstromToGPa, ep[2], ep[3]))
self._free_energies = np.array(free_energies)
self._temperatures = np.array(temperatures)
self._equiv_parameters = np.array(parameters)
self._equiv_volumes = np.array(self._equiv_parameters[:, 3])
self._equiv_energies = np.array(self._equiv_parameters[:, 0])
self._equiv_bulk_modulus = np.array(
self._equiv_parameters[:, 1] * EVAngstromToGPa)
self._num_elems = len(self._temperatures)
# For computing following values at temperatures, finite difference
# method is used. Therefore number of temperature points are needed
# larger than self._num_elems that nearly equals to the temparature
# point we expect.
self._set_thermal_expansion()
self._set_heat_capacity_P_numerical()
self._set_heat_capacity_P_polyfit()
self._set_gruneisen_parameter() # To be run after thermal expansion.
self._len = len(self._thermal_expansions)
assert(self._len + 1 == self._num_elems)
def plot(self, thin_number=10, volume_temp_exp=None):
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['font.family'] = 'serif'
plt.rcParams['text.usetex'] = True
fig, axs = plt.subplots(1, 3, figsize=(7, 3.5))
axs[0].xaxis.set_ticks_position('both')
axs[0].yaxis.set_ticks_position('both')
axs[0].xaxis.set_tick_params(which='both', direction='in')
axs[0].yaxis.set_tick_params(which='both', direction='in')
self._plot_helmholtz_volume(axs[0], thin_number=thin_number)
axs[1].xaxis.set_ticks_position('both')
axs[1].yaxis.set_ticks_position('both')
axs[1].xaxis.set_tick_params(which='both', direction='in')
axs[1].yaxis.set_tick_params(which='both', direction='in')
self._plot_volume_temperature(axs[1], exp_data=volume_temp_exp)
axs[2].xaxis.set_ticks_position('both')
axs[2].yaxis.set_ticks_position('both')
axs[2].xaxis.set_tick_params(which='both', direction='in')
axs[2].yaxis.set_tick_params(which='both', direction='in')
self._plot_thermal_expansion(axs[2])
plt.tight_layout()
return plt
def get_helmholtz_volume(self):
warnings.warn("QHA.get_helmholtz_volume() is deprecated."
"Use helmholtz_volume attribute.",
DeprecationWarning)
return self.helmholtz_volume
def plot_helmholtz_volume(self,
thin_number=10,
xlabel=r'Volume $(\AA^3)$',
ylabel='Free energy'):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_helmholtz_volume(ax,
thin_number=thin_number,
xlabel=xlabel,
ylabel=ylabel)
return plt
def plot_pdf_helmholtz_volume(self,
thin_number=10,
filename='helmholtz-volume.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_helmholtz_volume(ax, thin_number=thin_number)
plt.savefig(filename)
plt.close()
def write_helmholtz_volume(self, filename='helmholtz-volume.dat'):
w = open(filename, 'w')
for i, (t, ep, fe) in enumerate(zip(self._temperatures,
self._equiv_parameters,
self._free_energies)):
if i == self._len:
break
w.write("# Temperature: %f\n" % t)
w.write("# Parameters: %f %f %f %f\n" % tuple(ep))
for j, v in enumerate(self._volumes):
w.write("%20.15f %25.15f\n" % (v, fe[j]))
w.write("\n\n")
w.close()
def write_helmholtz_volume_fitted(self,
thin_number,
filename='helholtz-volume_fitted.dat'):
if self._energy_plot_factor is None:
_energy_plot_factor = 1
else:
_energy_plot_factor = self._energy_plot_factor
volume_points = np.linspace(
min(self._volumes), max(self._volumes), 201)
selected_volumes = []
selected_energies = []
for i, t in enumerate(self._temperatures[:self._len]):
if i % thin_number == 0:
selected_volumes.append(self._equiv_volumes[i])
selected_energies.append(self._equiv_energies[i])
for i, t in enumerate(self._temperatures[:self._len]):
if t >= 298:
if i > 0:
de = self._equiv_energies[i] - self._equiv_energies[i - 1]
dt = t - self._temperatures[i - 1]
e0 = ((298 - self._temperatures[i - 1]) / dt * de +
self._equiv_energies[i - 1])
else:
e0 = 0
break
e0 *= _energy_plot_factor
_data_vol_points = []
_data_eos = []
for i, t in enumerate(self._temperatures[:self._len]):
if i % thin_number == 0:
_data_vol_points.append(
np.array(self._free_energies[i]) * _energy_plot_factor - e0)
_data_eos.append(
self._eos(volume_points, * self._equiv_parameters[i])
* _energy_plot_factor - e0)
data_eos = np.array(_data_eos).T
data_vol_points = np.array(_data_vol_points).T
data_min = (np.array(selected_energies) * _energy_plot_factor - e0)
with open(filename, 'w') as w:
w.write("# Volume points\n")
for (j, k) in zip(self._volumes, data_vol_points):
w.write("%10.5f " % j)
for l in k:
w.write("%10.5f" % l)
w.write("\n")
w.write("\n# Fitted data\n")
for (m, n) in zip(volume_points, data_eos):
w.write("%10.5f " % m)
for ll in n:
w.write("%10.5f" % ll)
w.write("\n")
w.write("\n# Minimas\n")
for (a, b) in zip(selected_volumes, data_min):
w.write("%10.5f %10.5f %s" % (a, b, '\n'))
w.write('\n')
def get_volume_temperature(self):
warnings.warn("QHA.get_volume_temperature() is deprecated."
"Use volume_temperature attribute.",
DeprecationWarning)
return self.volume_temperature
def plot_volume_temperature(self, exp_data=None):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_volume_temperature(ax, exp_data=exp_data)
return plt
def plot_pdf_volume_temperature(self,
exp_data=None,
filename='volume-temperature.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_volume_temperature(ax, exp_data=exp_data)
plt.savefig(filename)
plt.close()
def write_volume_temperature(self, filename='volume-temperature.dat'):
w = open(filename, 'w')
for i in range(self._len):
w.write("%25.15f %25.15f\n" % (self._temperatures[i],
self._equiv_volumes[i]))
w.close()
def get_thermal_expansion(self):
warnings.warn("QHA.get_thermal_expansion() is deprecated."
"Use thermal_expansion attribute.",
DeprecationWarning)
return self.thermal_expansion
def plot_thermal_expansion(self):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_thermal_expansion(ax)
return plt
def plot_pdf_thermal_expansion(self, filename='thermal_expansion.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_thermal_expansion(ax)
plt.savefig(filename)
plt.close()
def write_thermal_expansion(self, filename='thermal_expansion.dat'):
w = open(filename, 'w')
for i in range(self._len):
w.write("%25.15f %25.15f\n" % (self._temperatures[i],
self._thermal_expansions[i]))
w.close()
def get_gibbs_temperature(self):
return self.gibbs_temperature
def plot_gibbs_temperature(self,
xlabel='Temperature (K)',
ylabel='Gibbs free energy'):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_gibbs_temperature(ax, xlabel=xlabel, ylabel=ylabel)
return plt
def plot_pdf_gibbs_temperature(self, filename='gibbs-temperature.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_gibbs_temperature(ax)
plt.savefig(filename)
plt.close()
def write_gibbs_temperature(self, filename='gibbs-temperature.dat'):
w = open(filename, 'w')
for i in range(self._len):
w.write("%20.15f %25.15f\n" % (self._temperatures[i],
self._equiv_energies[i]))
w.close()
def get_bulk_modulus_temperature(self):
return self.bulk_modulus_temperature
def plot_bulk_modulus_temperature(self,
xlabel='Temperature (K)',
ylabel='Bulk modulus'):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_bulk_modulus_temperature(ax,
xlabel=xlabel,
ylabel=ylabel)
return plt
def plot_pdf_bulk_modulus_temperature(
self,
filename='bulk_modulus-temperature.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_bulk_modulus_temperature(ax)
plt.savefig(filename)
plt.close()
def write_bulk_modulus_temperature(
self,
filename='bulk_modulus-temperature.dat'):
w = open(filename, 'w')
for i in range(self._len):
w.write("%20.15f %25.15f\n" % (self._temperatures[i],
self._equiv_bulk_modulus[i]))
w.close()
def get_heat_capacity_P_numerical(self):
return self.heat_capacity_P_numerical
def plot_heat_capacity_P_numerical(self, Z=1, exp_data=None):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_heat_capacity_P_numerical(ax, Z=Z, exp_data=exp_data)
return plt
def plot_pdf_heat_capacity_P_numerical(self,
exp_data=None,
filename='Cp-temperature.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_heat_capacity_P_numerical(ax, exp_data=exp_data)
plt.savefig(filename)
plt.close()
def write_heat_capacity_P_numerical(self, filename='Cp-temperature.dat'):
w = open(filename, 'w')
for i in range(self._len):
w.write("%20.15f %20.15f\n" % (self._temperatures[i],
self._cp_numerical[i]))
w.close()
def get_heat_capacity_P_polyfit(self):
return self.heat_capacity_P_polyfit
def plot_heat_capacity_P_polyfit(self, Z=1, exp_data=None):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_heat_capacity_P_polyfit(ax, Z=Z, exp_data=exp_data)
return plt
def plot_pdf_heat_capacity_P_polyfit(
self,
exp_data=None,
filename='Cp-temperature_polyfit.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_heat_capacity_P_polyfit(ax, exp_data=exp_data)
plt.savefig(filename)
plt.close()
def write_heat_capacity_P_polyfit(self,
filename='Cp-temperature_polyfit.dat',
filename_ev='entropy-volume.dat',
filename_cvv='Cv-volume.dat',
filename_dsdvt='dsdv-temperature.dat'):
wve = open(filename_ev, 'w')
wvcv = open(filename_cvv, 'w')
for i in range(1, self._len):
t = self._temperatures[i]
wve.write("# temperature %20.15f\n" % t)
wve.write("# %20.15f %20.15f %20.15f %20.15f %20.15f\n" %
tuple(self._volume_entropy_parameters[i - 1]))
wvcv.write("# temperature %20.15f\n" % t)
wvcv.write("# %20.15f %20.15f %20.15f %20.15f %20.15f\n" %
tuple(self._volume_cv_parameters[i - 1]))
for ve, vcv in zip(self._volume_entropy[i - 1],
self._volume_cv[i - 1]):
wve.write("%20.15f %20.15f\n" % tuple(ve))
wvcv.write("%20.15f %20.15f\n" % tuple(vcv))
wve.write("\n\n")
wvcv.write("\n\n")
wve.close()
wvcv.close()
w = open(filename, 'w')
for i in range(self._len):
w.write("%20.15f %20.15f\n" % (self._temperatures[i],
self._cp_polyfit[i]))
w.close()
w = open(filename_dsdvt, 'w') # GPa
for i in range(self._len):
w.write("%20.15f %20.15f\n" % (self._temperatures[i],
self._dsdv[i] * 1e21 / Avogadro))
w.close()
def get_gruneisen_temperature(self):
return self.gruneisen_temperature
def plot_gruneisen_temperature(self):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
self._plot_gruneisen_temperature(ax)
return plt
def plot_pdf_gruneisen_temperature(self,
filename='gruneisen-temperature.pdf'):
import matplotlib.pyplot as plt
self._set_rcParams(plt)
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
self._plot_gruneisen_temperature(ax)
plt.savefig(filename)
plt.close()
def write_gruneisen_temperature(self,
filename='gruneisen-temperature.dat'):
w = open(filename, 'w')
for i in range(self._len):
w.write("%20.15f %25.15f\n" % (self._temperatures[i],
self._gruneisen_parameters[i]))
w.close()
def _plot_helmholtz_volume(self,
ax,
thin_number=10,
xlabel=r'Volume $(\AA^3)$',
ylabel='Free energy'):
if self._energy_plot_factor is None:
_energy_plot_factor = 1
_ylabel = ylabel + ' (eV)'
else:
_energy_plot_factor = self._energy_plot_factor
_ylabel = ylabel
volume_points = np.linspace(min(self._volumes),
max(self._volumes),
201)
selected_volumes = []
selected_energies = []
thin_index = 0
for i, t in enumerate(self._temperatures[:self._len]):
if i % thin_number == 0:
selected_volumes.append(self._equiv_volumes[i])
selected_energies.append(self._equiv_energies[i])
for i, t in enumerate(self._temperatures[:self._len]):
if t >= 298:
if i > 0:
de = self._equiv_energies[i] - self._equiv_energies[i - 1]
dt = t - self._temperatures[i - 1]
e0 = ((298 - self._temperatures[i - 1]) / dt * de +
self._equiv_energies[i - 1])
else:
e0 = 0
break
e0 *= _energy_plot_factor
for i, t in enumerate(self._temperatures[:self._len]):
if i % thin_number == 0:
ax.plot(self._volumes,
np.array(self._free_energies[i]) * _energy_plot_factor
- e0,
'bo', markeredgecolor='b', markersize=3)
ax.plot(volume_points,
self._eos(volume_points, * self._equiv_parameters[i])
* _energy_plot_factor - e0, 'b-')
thin_index = i
for i, j in enumerate((0, thin_index)):
ax.text(self._volumes[-2],
(self._free_energies[j, -1] + (1 - i * 2) * 0.1 - 0.05) *
_energy_plot_factor - e0,
"%dK" % int(self._temperatures[j]),
fontsize=8)
ax.plot(selected_volumes,
np.array(selected_energies) * _energy_plot_factor - e0,
'ro-', markeredgecolor='r', markersize=3)
ax.set_xlabel(xlabel)
ax.set_ylabel(_ylabel)
def _plot_volume_temperature(self,
ax,
exp_data=None,
xlabel='Temperature (K)',
ylabel=r'Volume $(\AA^3)$'):
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.plot(self._temperatures[:self._len],
self._equiv_volumes[:self._len],
'r-')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
# exp
if exp_data:
ax.plot(exp_data[0], exp_data[1], 'ro')
def _plot_thermal_expansion(
self,
ax,
xlabel='Temperature (K)',
ylabel=r'Thermal expansion $(\mathrm{K}^{-1})$'):
from matplotlib.ticker import ScalarFormatter
class FixedScaledFormatter(ScalarFormatter):
def __init__(self):
ScalarFormatter.__init__(self, useMathText=True)
def _set_orderOfMagnitude(self, range):
self.orderOfMagnitude = -6
ax.yaxis.set_major_formatter(FixedScaledFormatter())
ax.ticklabel_format(style="sci", axis="y", scilimits=(0, 0))
beta = np.array(self._thermal_expansions)
ax.plot(self._temperatures[:self._len],
beta[:self._len],
'r-')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def _plot_gibbs_temperature(self,
ax,
xlabel='Temperature (K)',
ylabel='Gibbs free energy (eV)'):
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.plot(self._temperatures[:self._len],
self._equiv_energies[:self._len],
'r-')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
def _plot_bulk_modulus_temperature(self,
ax,
xlabel='Temperature (K)',
ylabel='Bulk modulus (GPa)'):
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.plot(self._temperatures[:self._len],
self._equiv_bulk_modulus[:self._len],
'r-')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
def _plot_heat_capacity_P_numerical(
self,
ax,
Z=1,
exp_data=None,
xlabel='Temperature (K)',
ylabel=r'$C\mathrm{_P}$ $\mathrm{(J/mol\cdot K)}$'):
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.plot(self._temperatures[:self._len],
np.array(self._cp_numerical[:self._len]) / Z,
'r-')
# exp
if exp_data:
ax.plot(exp_data[0], exp_data[1], 'ro')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
def _plot_heat_capacity_P_polyfit(
self,
ax,
Z=1,
exp_data=None,
xlabel='Temperature (K)',
ylabel=r'$C\mathrm{_P}$ $\mathrm{(J/mol\cdot K)}$'):
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.plot(self._temperatures[:self._len],
np.array(self._cp_polyfit[:self._len]) / Z,
'r-')
# exp
if exp_data:
ax.plot(exp_data[0], exp_data[1], 'ro')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
def _plot_gruneisen_temperature(self,
ax,
xlabel='Temperature (K)',
ylabel='Gruneisen parameter'):
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.plot(self._temperatures[:self._len],
self._gruneisen_parameters[:self._len],
'r-')
ax.set_xlim(self._temperatures[0],
self._temperatures[self._len - 1])
def _set_thermal_expansion(self):
beta = [0.]
for i in range(1, self._num_elems - 1):
dt = self._temperatures[i + 1] - self._temperatures[i - 1]
dv = self._equiv_volumes[i + 1] - self._equiv_volumes[i - 1]
beta.append(dv / dt / self._equiv_volumes[i])
self._thermal_expansions = beta
def _set_heat_capacity_P_numerical(self):
cp = []
g = np.array(self._equiv_energies) * EvTokJmol * 1000
cp.append(0.0)
for i in range(1, self._num_elems - 1):
t = self._temperatures[i]
parameters = np.polyfit(self._temperatures[i - 1:i + 2],
g[i - 1: i + 2], 2)
cp.append(- (2 * parameters[0]) * t)
self._cp_numerical = cp
def _set_heat_capacity_P_polyfit(self):
cp = [0.0]
dsdv = [0.0]
self._volume_entropy_parameters = []
self._volume_cv_parameters = []
self._volume_entropy = []
self._volume_cv = []
for j in range(1, self._num_elems - 1):
t = self._temperatures[j]
x = self._equiv_volumes[j]
try:
parameters = np.polyfit(self._volumes, self._cv[j], 4)
except np.lib.polynomial.RankWarning:
msg = [
"Failed to fit heat capacities to polynomial of degree 4."]
if len(self._volumes) < 5:
msg += [
"At least 5 volume points are needed for the fitting."]
raise RuntimeError("\n".join(msg))
cv_p = np.dot(parameters, np.array([x**4, x**3, x**2, x, 1]))
self._volume_cv_parameters.append(parameters)
try:
parameters = np.polyfit(self._volumes, self._entropy[j], 4)
except np.lib.polynomial.RankWarning:
msg = [
"Failed to fit entropies to polynomial of degree 4."]
if len(self._volumes) < 5:
msg += [
"At least 5 volume points are needed for the fitting."]
raise RuntimeError("\n".join(msg))
dsdv_t = np.dot(parameters[:4], np.array(
[4 * x**3, 3 * x**2, 2 * x, 1]))
self._volume_entropy_parameters.append(parameters)
try:
parameters = np.polyfit(self._temperatures[j - 1:j + 2],
self._equiv_volumes[j - 1: j + 2], 2)
except np.lib.polynomial.RankWarning:
msg = ("Failed to fit equilibrium volumes vs T to "
"polynomial of degree 2.")
raise RuntimeError(msg)
dvdt = parameters[0] * 2 * t + parameters[1]
cp.append(cv_p + t * dvdt * dsdv_t)
dsdv.append(dsdv_t)
self._volume_cv.append(np.array([self._volumes, self._cv[j]]).T)
self._volume_entropy.append(np.array([self._volumes,
self._entropy[j]]).T)
self._cp_polyfit = cp
self._dsdv = dsdv
def _set_gruneisen_parameter(self):
gamma = [0]
for i in range(1, self._num_elems - 1):
v = self._equiv_volumes[i]
kt = self._equiv_bulk_modulus[i]
beta = self._thermal_expansions[i]
try:
parameters = np.polyfit(self._volumes, self._cv[i], 4)
except np.lib.polynomial.RankWarning:
msg = [
"Failed to fit heat capacities to polynomial of degree 4."]
if len(self._volumes) < 5:
msg += [
"At least 5 volume points are needed for the fitting."]
raise RuntimeError("\n".join(msg))
cv = (np.dot(parameters, [v**4, v**3, v**2, v, 1]) /
v / 1000 / EvTokJmol * EVAngstromToGPa)
if cv < 1e-10:
gamma.append(0.0)
else:
gamma.append(beta * kt / cv)
self._gruneisen_parameters = gamma
def _get_num_elems(self, temperatures):
if self._t_max is None:
return len(temperatures)
else:
i = np.argmin(np.abs(temperatures - self._t_max))
return i + 1
def _set_rcParams(self, plt):
plt.rcParams['backend'] = 'PDF'
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['font.family'] = 'serif'
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['figure.subplot.left'] = 0.25
plt.rcParams['figure.subplot.bottom'] = 0.15
plt.rcParams['figure.figsize'] = 4, 6
plt.rcParams['text.usetex'] = True
| 36.903019 | 80 | 0.556068 | [
"BSD-3-Clause"
] | SeyedMohamadMoosavi/phonopy | phonopy/qha/core.py | 40,335 | Python |
import os
from argparse import ArgumentParser
from time import time
import yaml
import numpy as np
from fx_replicator import (
build_model, load_wave, save_wave, sliding_window, LossFunc
)
import nnabla as nn
#import nnabla_ext.cudnn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla.utils.save
import tqdm
def main():
args = parse_args()
with open(args.config_file) as fp:
config = yaml.safe_load(fp)
input_timesteps = config["input_timesteps"]
output_timesteps = config["output_timesteps"]
batch_size = config["batch_size"]
data = load_wave(args.input_file)
print("data.shape is:", data.shape)
print("data.len is:", len(data))
"""
from nnabla.ext_utils import get_extension_context
cuda_device_id = 0
ctx = get_extension_context('cudnn', device_id=cuda_device_id)
print("Context: {}".format(ctx))
nn.set_default_context(ctx) # Set CUDA as a default context.
"""
# padding and rounded up to the batch multiple
block_size = output_timesteps * batch_size
prepad = input_timesteps - output_timesteps
postpad = len(data) % block_size
print("postpad", block_size - postpad)
padded = np.concatenate((
np.zeros(prepad, np.float32),
data,
np.zeros(block_size - postpad, np.float32)))
x = sliding_window(padded, input_timesteps, output_timesteps)
x = x[:, :, np.newaxis]
y = np.zeros_like(x)
batchlen = x.shape[0]
print("x.length is:",batchlen)
xx = nn.Variable((batch_size , input_timesteps, 1))
nn.load_parameters("best_result.h5")
print("xx.shape is:", xx.shape)
yy = build_model(xx)
print("yy.shape is:", yy.shape)
print("x.shape in the loop is:", x[32:32 + batch_size , : , : ].shape)
start1 = time()
for step in range(0, batchlen , batch_size):
xx.d = x[step:step + batch_size , : , : ]
yy.forward()
y[step:step + batch_size , : , : ] = yy.d
proc_time = time() - start1
print(proc_time)
print(step)
y = y[:, -output_timesteps:, :].reshape(-1)[:len(data)]
save_wave(y, args.output_file)
print("finished\n")
proc_time = time() - start1
print(proc_time)
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--config_file", "-c", default="./config.yml",
help="configuration file (*.yml)")
parser.add_argument(
"--input_file", "-i",
help="input wave file (48kHz/mono, *.wav)")
parser.add_argument(
"--output_file", "-o", default="./predicted.wav",
help="output wave file (48kHz/mono, *.wav)")
parser.add_argument(
"--model_file", "-m",
help="input model file (*.h5)")
return parser.parse_args()
if __name__ == '__main__':
main()
| 26.090909 | 74 | 0.637282 | [
"MIT"
] | kmwebnet/Audio-Effect-replicator-nnabla | predict.py | 2,870 | Python |
"""
This module contains utiliy functions for fields which are used by both the
:mod:`~sphinxcontrib_django2.docstrings.attributes` and
:mod:`~sphinxcontrib_django2.docstrings.classes` modules.
"""
from django.apps import apps
from django.contrib import contenttypes
from django.db import models
from django.utils.encoding import force_str
def get_field_type(field, include_role=True):
"""
Get the type of a field including the correct intersphinx mappings.
:param field: The field
:type field: ~django.db.models.Field
:param include_directive: Whether or not the role :any:`py:class` should be included
:type include_directive: bool
:return: The type of the field
:rtype: str
"""
if isinstance(field, models.fields.related.RelatedField):
if isinstance(field.remote_field.model, str):
# This happens with foreign keys of abstract models
to = field.remote_field.model
else:
to = f"{field.remote_field.model.__module__}.{field.remote_field.model.__name__}"
return f":class:`~{type(field).__module__}.{type(field).__name__}` to :class:`~{to}`"
elif isinstance(field, models.fields.reverse_related.ForeignObjectRel):
to = field.remote_field.model
return (
f"Reverse :class:`~{type(field.remote_field).__module__}."
f"{type(field.remote_field).__name__}` from :class:`~{to.__module__}.{to.__name__}`"
)
else:
if include_role:
# For the docstrings of attributes, the :class: role is required
return f":class:`~{type(field).__module__}.{type(field).__name__}`"
else:
# For the :param: role in class docstrings, the :class: role is not required
return f"~{type(field).__module__}.{type(field).__name__}"
def get_field_verbose_name(field):
"""
Get the verbose name of the field.
If the field has a ``help_text``, it is also included.
In case the field is a related field, the ``related_name`` is used to link to the remote model.
For reverse related fields, the originating field is linked.
:param field: The field
:type field: ~django.db.models.Field
"""
help_text = ""
# Check whether the field is a reverse related field
if isinstance(field, models.fields.reverse_related.ForeignObjectRel):
# Convert related name to a readable name if ``snake_case`` is used
related_name = (
field.related_name.replace("_", " ") if field.related_name else None
)
if isinstance(field, models.fields.reverse_related.OneToOneRel):
# If a related name is given, use it, else use the verbose name of the remote model
related_name = related_name or field.remote_field.model._meta.verbose_name
# If field is a OneToOne field, use the prefix "The"
verbose_name = (
f"The {related_name} of this {field.model._meta.verbose_name}"
)
else:
# This means field is an instance of ManyToOneRel or ManyToManyRel
# If a related name is given, use it, else use the verbose name of the remote model
related_name = (
related_name or field.remote_field.model._meta.verbose_name_plural
)
# If field is a foreign key or a ManyToMany field, use the prefix "All"
verbose_name = (
f"All {related_name} of this {field.model._meta.verbose_name}"
)
# Always link to the origin of the reverse related field
verbose_name += (
f" (related name of :attr:`~{field.remote_field.model.__module__}"
f".{field.remote_field.model.__name__}.{field.remote_field.name}`)"
)
elif isinstance(field, contenttypes.fields.GenericForeignKey):
# GenericForeignKey does not inherit from django.db.models.Field and has no verbose_name
return (
f"Generic foreign key to the :class:`~django.contrib.contenttypes.models.ContentType` "
f"specified in "
f":attr:`~{field.model.__module__}.{field.model.__name__}.{field.ct_field}`"
)
else:
# This means the field is either a normal field or a forward related field
# If the field is a primary key, include a notice
primary_key = "Primary key: " if field.primary_key else ""
field_verbose_name = force_str(field.verbose_name)
# Make the first letter upper case while leave the rest unchanged
# (str.capitalize() would make the rest lower case, e.g. ID => Id)
verbose_name = (
primary_key + field_verbose_name[:1].upper() + field_verbose_name[1:]
)
help_text = force_str(field.help_text)
# Add help text if field has one
if help_text:
# Separate verbose name and help text by a dot
if not verbose_name.endswith("."):
verbose_name += ". "
verbose_name += help_text
if isinstance(field, models.fields.related.RelatedField):
# If field is a forward related field, reference the remote model
to = field.remote_field.model
if isinstance(to, str):
# This happens with foreign keys of abstract models
if "." in to:
to = apps.get_model(to)
elif to == "self":
to = field.model
else:
to = apps.get_model(field.model._meta.app_label, to)
# If a related name is defined
if hasattr(field.remote_field, "related_name"):
related_name = (
field.remote_field.related_name or field.model.__name__.lower()
)
verbose_name += (
f" (related name: :attr:`~{to.__module__}.{to.__name__}.{related_name}`)"
)
return verbose_name
| 43.318519 | 99 | 0.639193 | [
"Apache-2.0"
] | mkalioby/sphinxcontrib-django2 | sphinxcontrib_django2/docstrings/field_utils.py | 5,848 | Python |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
def fact(name=None):
r"""Output a fact about factorials.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Fact", name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"Fact", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Fact", name,
_ctx._post_execution_callbacks)
return _result
except _core._FallbackException:
return fact_eager_fallback(
name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def fact_eager_fallback(name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fact
"""
_ctx = ctx if ctx else _context.context()
_inputs_flat = []
_attrs = None
_result = _execute.execute(b"Fact", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Fact", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "Fact"
# output_arg {
# name: "fact"
# type: DT_STRING
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n\020\n\004Fact\032\010\n\004fact\030\007")
| 32.107527 | 78 | 0.739786 | [
"MIT"
] | caiovini/Image_reader_api | venv/Lib/site-packages/tensorflow/python/ops/gen_user_ops.py | 2,986 | Python |
#!/usr/bin/env python3
# md_lj_module.py
#------------------------------------------------------------------------------------------------#
# This software was written in 2016/17 #
# by Michael P. Allen <[email protected]>/<[email protected]> #
# and Dominic J. Tildesley <[email protected]> ("the authors"), #
# to accompany the book "Computer Simulation of Liquids", second edition, 2017 ("the text"), #
# published by Oxford University Press ("the publishers"). #
# #
# LICENCE #
# Creative Commons CC0 Public Domain Dedication. #
# To the extent possible under law, the authors have dedicated all copyright and related #
# and neighboring rights to this software to the PUBLIC domain worldwide. #
# This software is distributed without any warranty. #
# You should have received a copy of the CC0 Public Domain Dedication along with this software. #
# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. #
# #
# DISCLAIMER #
# The authors and publishers make no warranties about the software, and disclaim liability #
# for all uses of the software, to the fullest extent permitted by applicable law. #
# The authors and publishers do not recommend use of this software for any purpose. #
# It is made freely available, solely to clarify points made in the text. When using or citing #
# the software, you should not imply endorsement by the authors or publishers. #
#------------------------------------------------------------------------------------------------#
"""Force routine for MD simulation, Lennard-Jones atoms."""
fast = True # Change this to replace NumPy force evaluation with slower Python
class PotentialType:
"""A composite variable for interactions."""
def __init__(self, cut, pot, vir, lap, ovr):
self.cut = cut # the potential energy cut (but not shifted) at r_cut
self.pot = pot # the potential energy cut-and-shifted at r_cut
self.vir = vir # the virial
self.lap = lap # the Laplacian
self.ovr = ovr # a flag indicating overlap (i.e. pot too high to use)
def __add__(self, other):
cut = self.cut + other.cut
pot = self.pot + other.pot
vir = self.vir + other.vir
lap = self.lap + other.lap
ovr = self.ovr or other.ovr
return PotentialType(cut,pot,vir,lap,ovr)
def introduction():
"""Prints out introductory statements at start of run."""
print('Lennard-Jones potential')
print('Cut-and-shifted version for dynamics')
print('Cut (but not shifted) version also calculated')
print('Diameter, sigma = 1')
print('Well depth, epsilon = 1')
if fast:
print('Fast NumPy force routine')
else:
print('Slow Python force routine')
def conclusion():
"""Prints out concluding statements at end of run."""
print('Program ends')
def force ( box, r_cut, r ):
"""Takes in box, cutoff range, and coordinate array, and calculates forces and potentials etc."""
import numpy as np
# It is assumed that positions are in units where box = 1
# Forces are calculated in units where sigma = 1 and epsilon = 1
n, d = r.shape
assert d==3, 'Dimension error in force'
sr2_ovr = 1.77 # Overlap threshold (pot > 100)
r_cut_box = r_cut / box
r_cut_box_sq = r_cut_box ** 2
box_sq = box ** 2
# Calculate potential at cutoff
sr2 = 1.0 / r_cut**2 # in sigma=1 units
sr6 = sr2 ** 3
sr12 = sr6 **2
pot_cut = sr12 - sr6 # Without numerical factor 4
# Initialize
f = np.zeros_like(r)
total = PotentialType ( cut=0.0, pot=0.0, vir=0.0, lap=0.0, ovr=False )
if fast:
for i in range(n-1):
rij = r[i,:]-r[i+1:,:] # Separation vectors for j>i
rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units
rij_sq = np.sum(rij**2,axis=1) # Squared separations for j>1
in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff
rij_sq = rij_sq * box_sq # Now in sigma=1 units
rij = rij * box # Now in sigma=1 units
sr2 = np.where ( in_range, 1.0/rij_sq, 0.0 ) # (sigma/rij)**2, only if in range
ovr = sr2 > sr2_ovr # Overlap if too close
sr6 = sr2 ** 3
sr12 = sr6 ** 2
cut = sr12 - sr6 # LJ pair potential (cut but not shifted)
vir = cut + sr12 # LJ pair virial
pot = np.where ( in_range, cut-pot_cut, 0.0 ) # LJ pair potential (cut-and-shifted)
lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ pair Laplacian
fij = vir * sr2 # LJ scalar part of forces
fij = rij * fij[:,np.newaxis] # LJ pair forces
total = total + PotentialType ( cut=np.sum(cut), pot=np.sum(pot),
vir=np.sum(vir), lap=np.sum(lap), ovr=np.any(ovr) )
f[i,:] = f[i,:] + np.sum(fij,axis=0)
f[i+1:,:] = f[i+1:,:] - fij
else:
for i in range(n-1): # Outer loop
for j in range(i+1,n): # Inner loop
rij = r[i,:]-r[j,:] # Separation vector
rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units
rij_sq = np.sum(rij**2) # Squared separation
if rij_sq < r_cut_box_sq: # Check within cutoff
rij_sq = rij_sq * box_sq # Now in sigma=1 units
rij = rij * box # Now in sigma=1 units
sr2 = 1.0 / rij_sq # (sigma/rij)**2
ovr = sr2 > sr2_ovr # Overlap if too close
sr6 = sr2 ** 3
sr12 = sr6 ** 2
cut = sr12 - sr6 # LJ pair potential (cut but not shifted)
vir = cut + sr12 # LJ pair virial
pot = cut - pot_cut # LJ pair potential (cut-and-shifted)
lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ pair Laplacian
fij = rij * vir * sr2 # LJ pair forces
total = total + PotentialType ( cut=cut, pot=pot, vir=vir, lap=lap, ovr=ovr )
f[i,:] = f[i,:] + fij
f[j,:] = f[j,:] - fij
# Multiply results by numerical factors
f = f * 24.0 # 24*epsilon
total.cut = total.cut * 4.0 # 4*epsilon
total.pot = total.pot * 4.0 # 4*epsilon
total.vir = total.vir * 24.0 / 3.0 # 24*epsilon and divide virial by 3
total.lap = total.lap * 24.0 * 2.0 # 24*epsilon and factor 2 for ij and ji
return total, f
def hessian ( box, r_cut, r, f ):
"""Calculates Hessian function (for 1/N correction to config temp)."""
import numpy as np
# This routine is only needed in a constant-energy ensemble
# It is assumed that positions are in units where box = 1
# but the result is given in units where sigma = 1 and epsilon = 1
# It is assumed that forces have already been calculated in array f
n, d = r.shape
assert d==3, 'Dimension error in hessian'
assert np.all ( r.shape==f.shape ), 'Dimension mismatch in hessian'
r_cut_box = r_cut / box
r_cut_box_sq = r_cut_box ** 2
box_sq = box ** 2
hes = 0.0
if fast:
for i in range(n-1):
rij = r[i,:] - r[i+1:,:] # Separation vectors
rij = rij - np.rint ( rij ) # Periodic boundary conditions in box=1 units
rij_sq = np.sum(rij**2,axis=1) # Squared separations for j>1
in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff
rij_sq = rij_sq * box_sq # Now in sigma=1 units
rij = rij * box # Now in sigma=1 units
fij = f[i,:] - f[i+1:,:] # Differences in forces
ff = np.sum(fij*fij,axis=1)
rf = np.sum(rij*fij,axis=1)
sr2 = np.where ( in_range, 1.0 / rij_sq, 0.0 ) # Only where in range
sr6 = sr2 ** 3
sr8 = sr6 * sr2
sr10 = sr8 * sr2
v1 = 24.0 * ( 1.0 - 2.0 * sr6 ) * sr8
v2 = 96.0 * ( 7.0 * sr6 - 2.0 ) * sr10
hes = hes + np.sum(v1 * ff) + np.sum(v2 * rf**2)
else:
for i in range(n-1):
for j in range(i+1,n):
rij = r[i,:] - r[j,:] # Separation vector
rij = rij - np.rint ( rij ) # Periodic boundary conditions in box=1 units
rij_sq = np.sum ( rij**2 ) # Squared separation
if rij_sq < r_cut_box_sq:
rij_sq = rij_sq * box_sq # Now in sigma=1 units
rij = rij * box # Now in sigma=1 units
fij = f[i,:] - f[j,:] # Difference in forces
ff = np.dot(fij,fij)
rf = np.dot(rij,fij)
sr2 = 1.0 / rij_sq
sr6 = sr2 ** 3
sr8 = sr6 * sr2
sr10 = sr8 * sr2
v1 = 24.0 * ( 1.0 - 2.0 * sr6 ) * sr8
v2 = 96.0 * ( 7.0 * sr6 - 2.0 ) * sr10
hes = hes + v1 * ff + v2 * rf**2
return hes
| 48.116279 | 109 | 0.478879 | [
"CC0-1.0"
] | Allen-Tildesley/examples | python_examples/md_lj_module.py | 10,345 | Python |
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""Tool to find or compare big functions in a js or ll file
"""
import sys
def humanbytes(nbytes):
if nbytes > 9 * 1024 * 1024:
return '{}MB'.format(nbytes / 1024 / 1024)
elif nbytes > 9 * 1024:
return '{}KB'.format(nbytes / 1024)
else:
return '{}B'.format(nbytes)
def processfile(filename):
start = None
curr = None
nbytes = None
data = {}
for i, line in enumerate(open(filename)):
if line.startswith(('function ', 'define ', ' (func ')) and '}' not in line:
start = i
curr = line
nbytes = len(line)
elif line.startswith(('}', ' )')) and curr:
nlines = i - start
data[curr] = (nlines, nbytes + 1)
curr = None
start = None
elif curr:
nbytes += len(line)
return data
def common_compare(data1, data2):
fns1 = set(data1.keys())
fns2 = set(data2.keys())
commonfns = fns1.intersection(fns2)
commonlinediff = 0
commonbytediff = 0
for fn in commonfns:
d1 = data1[fn]
d2 = data2[fn]
commonlinediff += d2[0] - d1[0]
commonbytediff += d2[1] - d1[1]
linesword = 'more' if commonlinediff >= 0 else 'less'
bytesword = 'more' if commonbytediff >= 0 else 'less'
print('file 2 has {} lines {} than file 1 in {} common functions'.format(abs(commonlinediff), linesword, len(commonfns)))
print('file 2 has {} {} than file 1 in {} common functions'.format(humanbytes(abs(commonbytediff)), bytesword, len(commonfns)))
def uniq_compare(data1, data2):
fns1 = set(data1.keys())
fns2 = set(data2.keys())
uniqfns1 = fns1 - fns2
uniqfns2 = fns2 - fns1
uniqlines1 = 0
uniqbytes1 = 0
uniqlines2 = 0
uniqbytes2 = 0
for fn in uniqfns1:
d = data1[fn]
uniqlines1 += d[0]
uniqbytes1 += d[1]
for fn in uniqfns2:
d = data2[fn]
uniqlines2 += d[0]
uniqbytes2 += d[1]
uniqcountdiff = len(uniqfns2) - len(uniqfns1)
assert len(fns2) - len(fns1) == uniqcountdiff
uniqlinediff = uniqlines2 - uniqlines1
uniqbytediff = uniqbytes2 - uniqbytes1
countword = 'more' if uniqcountdiff >= 0 else 'less'
linesword = 'more' if uniqlinediff >= 0 else 'less'
bytesword = 'more' if uniqbytediff >= 0 else 'less'
print('file 2 has {} functions {} than file 1 overall (unique: {} vs {})'.format(abs(uniqcountdiff), countword, len(uniqfns2), len(uniqfns1)))
print('file 2 has {} lines {} than file 1 overall in unique functions'.format(abs(uniqlinediff), linesword))
print('file 2 has {} {} than file 1 overall in unique functions'.format(humanbytes(abs(uniqbytediff)), bytesword))
def list_bigfuncs(data):
data = list(data.items())
data.sort(key=lambda f_d: f_d[1][0])
print(''.join(['%6d lines (%6s) : %s' % (d[0], humanbytes(d[1]), f) for f, d in data]))
def main():
if len(sys.argv) < 2 or len(sys.argv) > 3 or sys.argv[1] == '--help':
print('Usage:')
print(' {} file1 - list functions in a file in ascending order of size'.format(sys.argv[0]))
print(' {} file1 file2 - compare functions across two files'.format(sys.argv[0]))
return 1
if len(sys.argv) == 2:
filename = sys.argv[1]
data = processfile(filename)
list_bigfuncs(data)
return 0
if len(sys.argv) == 3:
filename1 = sys.argv[1]
data1 = processfile(filename1)
filename2 = sys.argv[2]
data2 = processfile(filename2)
uniq_compare(data1, data2)
common_compare(data1, data2)
return 0
assert False
if __name__ == '__main__':
sys.exit(main())
| 32.875 | 146 | 0.604563 | [
"MIT"
] | 0x53A/emscripten | tools/find_bigfuncs.py | 3,945 | Python |
from datetime import date
from argparse import Namespace
from django.contrib import admin
from django_q.tasks import async_task
from import_export.admin import ImportExportModelAdmin
from .models import (
MunicipalStaffContactsUpdate,
IncomeExpenditureV2Update,
CashFlowV2Update,
RepairsMaintenanceV2Update,
AgedDebtorFactsV2Update,
AgedCreditorFactsV2Update,
CapitalFactsV2Update,
GrantFactsV2Update,
FinancialPositionFactsV2Update,
UIFWExpenseFactsUpdate,
AuditOpinionFactsUpdate,
AgedCreditorItemsV2,
AgedDebtorItemsV2,
CflowItemsV2,
IncexpItemsV2,
FinancialPositionItemsV2,
RepairsMaintenanceItemsV2,
CapitalItemsV2,
GovernmentFunctionsV2,
GrantTypesV2,
CapitalTypeV2,
DemarcationChanges,
)
from .resources import (
AgedDebtorItemsV2Resource,
AgedCreditorItemsV2Resource,
CashflowItemsV2Resource,
IncexpItemsV2Resource,
CapitalItemsV2Resource,
FinancialPositionItemsV2Resource,
RepairsMaintenanceItemsV2Resource,
GovernmentFunctionsV2Resource,
GrantTypesV2Resource,
CapitalTypeV2Resource,
)
class BaseUpdateAdmin(admin.ModelAdmin):
list_display = ("user", "datetime", "deleted", "inserted",)
readonly_fields = ("user", "deleted", "inserted",)
task_function = None
task_name = None
def get_exclude(self, request, obj=None):
if obj is None:
return ("user",)
else:
return super(BaseUpdateAdmin, self).get_exclude(request, obj)
def save_model(self, request, obj, form, change):
# Set the user to the current user
obj.user = request.user
# Process default save behavior
super(BaseUpdateAdmin, self).save_model(
request, obj, form, change
)
# Queue task
if not change:
async_task(
self.task_function,
obj,
task_name=self.task_name,
batch_size=10000,
)
@admin.register(MunicipalStaffContactsUpdate)
class MunicipalStaffContactsUpdateAdmin(BaseUpdateAdmin):
task_function = "municipal_finance.update.update_municipal_staff_contacts"
task_name = "Municipal staff contacts update"
@admin.register(UIFWExpenseFactsUpdate)
class UIFWExpenseFactsUpdateAdmin(BaseUpdateAdmin):
task_function = "municipal_finance.update.update_uifw_expense_facts"
task_name = "UIFW Expense Facts update"
@admin.register(AuditOpinionFactsUpdate)
class AuditOpinionFactsUpdateAdmin(BaseUpdateAdmin):
task_function = "municipal_finance.update.update_audit_opinion_facts"
task_name = "Audit Opinion Facts update"
@admin.register(IncomeExpenditureV2Update)
class IncomeExpenditureV2UpdateAdmin(BaseUpdateAdmin):
task_function = "municipal_finance.update.update_income_expenditure_v2"
task_name = "Income & Expenditure v2 update"
@admin.register(CashFlowV2Update)
class CashFlowV2UpdateAdmin(BaseUpdateAdmin):
task_function = "municipal_finance.update.update_cash_flow_v2"
task_name = "Cash flow v2 update"
@admin.register(RepairsMaintenanceV2Update)
class RepairsMaintenanceV2UpdateAdmin(BaseUpdateAdmin):
task_function = "municipal_finance.update.update_repairs_maintenance_v2"
task_name = "Repairs & Maintenance v2 update"
@admin.register(AgedDebtorFactsV2Update)
class AgedDebtorFactsV2UpdateAdmin(BaseUpdateAdmin):
task_function = "municipal_finance.update.update_aged_debtor_facts_v2"
task_name = "Aged Debtor Facts v2 update"
@admin.register(AgedCreditorFactsV2Update)
class AgedCreditorFactsV2UpdateAdmin(BaseUpdateAdmin):
task_function = "municipal_finance.update.update_aged_creditor_facts_v2"
task_name = "Aged Creditor Facts v2 update"
@admin.register(CapitalFactsV2Update)
class CapitalFactsV2UpdateAdmin(BaseUpdateAdmin):
task_function = "municipal_finance.update.update_capital_facts_v2"
task_name = "Capital Facts v2 update"
@admin.register(GrantFactsV2Update)
class GrantFactsV2UpdateAdmin(BaseUpdateAdmin):
task_function = "municipal_finance.update.update_grant_facts_v2"
task_name = "Grant Facts v2 update"
@admin.register(FinancialPositionFactsV2Update)
class FinancialPositionFactsV2UpdateAdmin(BaseUpdateAdmin):
task_function = "municipal_finance.update.update_financial_position_facts_v2"
task_name = "FinancialPosition Facts v2 update"
@admin.register(AgedCreditorItemsV2)
class AgedCreditorItemsV2Admin(ImportExportModelAdmin):
resource_class = AgedCreditorItemsV2Resource
list_display = ("code", "label",)
@admin.register(AgedDebtorItemsV2)
class AgedDebtorItemsV2Admin(ImportExportModelAdmin):
resource_class = AgedDebtorItemsV2Resource
list_display = ("code", "label",)
@admin.register(CflowItemsV2)
class CashFlowItemsV2Admin(ImportExportModelAdmin):
resource_class = CashflowItemsV2Resource
list_display = ("code", "label",)
@admin.register(IncexpItemsV2)
class IncexpItemsV2Admin(ImportExportModelAdmin):
resource_class = IncexpItemsV2Resource
list_display = ("code", "label",)
@admin.register(FinancialPositionItemsV2)
class FinancialPositionItemsV2Admin(ImportExportModelAdmin):
resource_class = FinancialPositionItemsV2Resource
list_display = ("code", "label",)
@admin.register(RepairsMaintenanceItemsV2)
class RepairsMaintenanceItemsV2Admin(ImportExportModelAdmin):
resource_class = RepairsMaintenanceItemsV2Resource
list_display = ("code", "label",)
@admin.register(GovernmentFunctionsV2)
class GovernmentFunctionsV2Admin(ImportExportModelAdmin):
resource_class = GovernmentFunctionsV2Resource
list_display = ("code", "label",)
@admin.register(GrantTypesV2)
class GrantTypesV2Admin(ImportExportModelAdmin):
resource_class = GrantTypesV2Resource
list_display = ("code", "name",)
@admin.register(CapitalTypeV2)
class CapitalTypeV2Admin(ImportExportModelAdmin):
resource_class = CapitalTypeV2Resource
list_display = ("code", "label",)
@admin.register(DemarcationChanges)
class DemarcationChangesAdmin(admin.ModelAdmin):
list_display = (
"date",
"old_code",
"new_code",
"old_code_transition",
"new_code_transition",
)
| 30.346341 | 81 | 0.766436 | [
"MIT"
] | Code4SA/municipal-data-api | municipal_finance/admin.py | 6,221 | Python |
# Author: Mainak Jas <[email protected]>
# Romain Trachel <[email protected]>
#
# License: BSD (3-clause)
import warnings
import os.path as op
import numpy as np
from nose.tools import assert_true, assert_raises
from numpy.testing import assert_array_equal
from mne import io, read_events, Epochs, pick_types
from mne.decoding import Scaler, FilterEstimator
from mne.decoding import PSDEstimator, EpochsVectorizer
warnings.simplefilter('always') # enable b/c these tests throw warnings
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
start, stop = 0, 8
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
def test_scaler():
"""Test methods of Scaler
"""
raw = io.read_raw_fif(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
scaler = Scaler(epochs.info)
y = epochs.events[:, -1]
# np invalid divide value warnings
with warnings.catch_warnings(record=True):
X = scaler.fit_transform(epochs_data, y)
assert_true(X.shape == epochs_data.shape)
X2 = scaler.fit(epochs_data, y).transform(epochs_data)
assert_array_equal(X2, X)
# Test inverse_transform
with warnings.catch_warnings(record=True): # invalid value in mult
Xi = scaler.inverse_transform(X, y)
assert_array_equal(epochs_data, Xi)
# Test init exception
assert_raises(ValueError, scaler.fit, epochs, y)
assert_raises(ValueError, scaler.transform, epochs, y)
def test_filterestimator():
"""Test methods of FilterEstimator
"""
raw = io.read_raw_fif(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
# Add tests for different combinations of l_freq and h_freq
filt = FilterEstimator(epochs.info, l_freq=1, h_freq=40)
y = epochs.events[:, -1]
with warnings.catch_warnings(record=True): # stop freq attenuation warning
X = filt.fit_transform(epochs_data, y)
assert_true(X.shape == epochs_data.shape)
assert_array_equal(filt.fit(epochs_data, y).transform(epochs_data), X)
filt = FilterEstimator(epochs.info, l_freq=0, h_freq=40)
y = epochs.events[:, -1]
with warnings.catch_warnings(record=True): # stop freq attenuation warning
X = filt.fit_transform(epochs_data, y)
filt = FilterEstimator(epochs.info, l_freq=1, h_freq=1)
y = epochs.events[:, -1]
with warnings.catch_warnings(record=True): # stop freq attenuation warning
assert_raises(ValueError, filt.fit_transform, epochs_data, y)
filt = FilterEstimator(epochs.info, l_freq=1, h_freq=None)
with warnings.catch_warnings(record=True): # stop freq attenuation warning
X = filt.fit_transform(epochs_data, y)
# Test init exception
assert_raises(ValueError, filt.fit, epochs, y)
assert_raises(ValueError, filt.transform, epochs, y)
def test_psdestimator():
"""Test methods of PSDEstimator
"""
raw = io.read_raw_fif(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
psd = PSDEstimator(2 * np.pi, 0, np.inf)
y = epochs.events[:, -1]
X = psd.fit_transform(epochs_data, y)
assert_true(X.shape[0] == epochs_data.shape[0])
assert_array_equal(psd.fit(epochs_data, y).transform(epochs_data), X)
# Test init exception
assert_raises(ValueError, psd.fit, epochs, y)
assert_raises(ValueError, psd.transform, epochs, y)
def test_epochs_vectorizer():
"""Test methods of EpochsVectorizer
"""
raw = io.read_raw_fif(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
with warnings.catch_warnings(record=True):
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
vector = EpochsVectorizer(epochs.info)
y = epochs.events[:, -1]
X = vector.fit_transform(epochs_data, y)
# Check data dimensions
assert_true(X.shape[0] == epochs_data.shape[0])
assert_true(X.shape[1] == epochs_data.shape[1] * epochs_data.shape[2])
assert_array_equal(vector.fit(epochs_data, y).transform(epochs_data), X)
# Check if data is preserved
n_times = epochs_data.shape[2]
assert_array_equal(epochs_data[0, 0, 0:n_times], X[0, 0:n_times])
# Check inverse transform
Xi = vector.inverse_transform(X, y)
assert_true(Xi.shape[0] == epochs_data.shape[0])
assert_true(Xi.shape[1] == epochs_data.shape[1])
assert_array_equal(epochs_data[0, 0, 0:n_times], Xi[0, 0, 0:n_times])
# check if inverse transform works with different number of epochs
Xi = vector.inverse_transform(epochs_data[0], y)
assert_true(Xi.shape[1] == epochs_data.shape[1])
assert_true(Xi.shape[2] == epochs_data.shape[2])
# Test init exception
assert_raises(ValueError, vector.fit, epochs, y)
assert_raises(ValueError, vector.transform, epochs, y)
| 36.791411 | 79 | 0.679006 | [
"BSD-3-Clause"
] | ARudiuk/mne-python | mne/decoding/tests/test_transformer.py | 5,997 | Python |
from flask import render_template
from flask import redirect
from flask import request
from flask import url_for
from flask import flash
from flask_login import login_user
from flask_login import logout_user
from flask_login import login_required
from flask_login import current_user
from . import auth
from .forms import LoginForm
from .forms import RegistrationForm
from .forms import ChangePasswordForm
from .forms import PasswordResetRequestForm
from .forms import PasswordResetForm
from .forms import ChangeEmailForm
from .. import db
from ..models import User
from ..email import send_email
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.update_last_seen()
if (
not current_user.confirmed and
request.endpoint[:5] != 'auth.' and
request.endpoint != 'static'
):
return redirect(url_for('auth.unconfirmed'))
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(
email=form.email.data,
username=form.username.data,
password=form.password.data
)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account', 'auth/email/confirm',
user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(
current_user.email,
'Confirm Your Account',
'auth/email/confirm',
user=current_user, token=token
)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template('auth/change_password.html', form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(
user.email,
'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next')
)
flash('An Email with instructions to reset your '
'password has been sent to you.')
else:
flash('Email not registered')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(
new_email,
'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token
)
flash('An email with instructions to confirm your '
'new email address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template('auth/change_email.html', form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated.')
else:
flash('Invalid request.')
return redirect(url_for('main.index'))
| 33.505208 | 78 | 0.655215 | [
"MIT"
] | qimiaoxue/flash_card | app/auth/views.py | 6,433 | Python |
from dataclasses import dataclass
from cannabis.types.blockchain_format.sized_bytes import bytes32
from cannabis.util.ints import uint32
from cannabis.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class PoolTarget(Streamable):
puzzle_hash: bytes32
max_height: uint32 # A max height of 0 means it is valid forever
| 27.769231 | 69 | 0.814404 | [
"Apache-2.0"
] | CannabisChain/cannabis-blockchain | cannabis/types/blockchain_format/pool_target.py | 361 | Python |
import logging
LOG = logging.getLogger(__name__)
def export_transcripts(adapter, build="37"):
"""Export all transcripts from the database
Args:
adapter(scout.adapter.MongoAdapter)
build(str)
Yields:
transcript(scout.models.Transcript)
"""
LOG.info("Exporting all transcripts")
for tx_obj in adapter.transcripts(build=build):
yield tx_obj
| 20 | 51 | 0.675 | [
"BSD-3-Clause"
] | Clinical-Genomics/scout | scout/export/transcript.py | 400 | Python |
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
from .metrics import mse_score, rmse_score, r2_score, mae_score
from ..features.build_features import StandardScaler, MinMaxScaler
class LinearRegressor():
"""Linear regressor"""
def __init__(self, method='normal_equation', normalize=False, lr=0.01, epochs=1000, add_intercept=False):
assert method in ['normal_equation', 'gradient_descent'], "Method not supported. Supported methods are 'normal_equation' and 'gradient_descent'"
self.method = method
self.normalize = normalize
self.add_intercept = add_intercept
self._weights = None
if self.method == 'gradient_descent':
self.lr = lr
self.epochs = epochs
if self.normalize:
self._feature_scaler = MinMaxScaler()
self._target_scaler = MinMaxScaler()
def fit(self, X, y):
"""Fit the model to the data"""
if self.normalize:
X = self._feature_scaler.fit_transform(X)
y = self._target_scaler.fit_transform(y)
X = X.to_numpy()
if self.add_intercept:
X = np.hstack((np.ones((X.shape[0], 1)), X))
y = y.to_numpy()
if self.method == 'normal_equation':
self._weights = np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y)
else:
# mse_new = np.inf
self._weights = np.zeros(X.shape[1])
self.cost_history = [0] * self.epochs
for i in range(self.epochs):
grad = np.dot(X.T, np.dot(X, self._weights) - y) / y.shape[0]
self._weights = self._weights - self.lr * grad
self.cost_history[i] = mse_score(y, np.dot(X, self._weights))
# if (rmse_new > rmse_old):
# print("Stopped at iteration {}".format(i))
# break
plt.scatter(range(self.epochs), self.cost_history)
plt.xlabel('epoch')
plt.ylabel('mse')
def predict(self, X):
"""Use the fitted model to predict on data"""
assert self._weights is not None, "Model needs to be fitted first. Use the fit method"
if self.normalize:
X = self._feature_scaler.transform(X)
X = X.to_numpy()
if self.add_intercept:
X = np.hstack((np.ones((X.shape[0], 1)), X))
y_pred = np.dot(X, self._weights)
if self.normalize:
y_pred = self._target_scaler.inverse_transform(y_pred)
return np.round(y_pred, 2)
def get_weights(self):
"""Get weights from the fitted model"""
assert self._weights is not None, "Model needs to be fitted first. Use the fit method"
return self._weights
def score(self, X, y, metric='r2'):
"""Score the model"""
assert metric in ['r2', 'rmse', 'mae'], "Metric not supported. Supported metrics are 'r2', 'rmse' and 'mae'"
y_pred = self.predict(X)
if metric == 'r2':
score = r2_score(y, y_pred)
elif metric == 'rmse':
score = rmse_score(y, y_pred)
elif metric == 'mae':
score = mae_score(y, y_pred)
return score | 34.197917 | 152 | 0.573865 | [
"MIT"
] | orsdanilo/ml-from-scratch | src/models/_linear.py | 3,283 | Python |
from copy import deepcopy
from quest.quest_manager import QuestManager
import settings
from twitch.channel import Channel
class QuestChannel(Channel):
def __init__(self, owner, channel_manager):
super().__init__(owner, channel_manager)
self.quest_manager = QuestManager(self)
self.mod_commands.add_commands(
exact_match_commands={
'!queston': lambda _: self.channel_manager.enable_quest(self.owner),
'!questoff': lambda _: self.channel_manager.disable_quest(self.owner)
}, starts_with_commands={
'!questcooldown': self.set_quest_cooldown})
def set_quest_cooldown(self, display_name, cooldown):
"""
Sets the quest cooldown to be the specified value.
:param display_name: str - The display name of the person trying to set the cooldown
:param cooldown: str - The raw message specifying the value to set the cooldown to
:return:
"""
try:
self.channel_manager.set_quest_cooldown(self.owner, int(cooldown))
except (IndexError, ValueError):
self.channel_manager.bot.send_whisper(
display_name, 'Invalid usage! Sample usage: !questcooldown 90')
def check_commands(self, display_name, msg, is_mod, is_sub):
"""
Connect to other command lists whose requirements are met.
:param display_name: str - The display name of the command sender
:param msg: str - The full message that the user sent that starts with "!"
:param is_mod: bool - Whether the sender is a mod
:param is_sub: bool - Whether the sender is a sub
"""
super().check_commands(display_name, msg, is_mod, is_sub)
# Check quest commands
self.quest_manager.commands.execute_command(display_name, msg)
| 39.404255 | 92 | 0.666307 | [
"MIT"
] | Xelaadryth/Xelabot | quest_bot/quest_channel.py | 1,852 | Python |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Centralized catalog of paths."""
import os
class DatasetCatalog(object):
DATA_DIR = "./datasets"
DATASETS = {
"coco_2017_train": {
"img_dir": "coco/train2017",
"ann_file": "coco/annotations/instances_train2017.json"
},
"coco_2017_val": {
"img_dir": "coco/val2017",
"ann_file": "coco/annotations/instances_val2017.json"
},
"coco_2017_test_dev": {
"img_dir": "coco/test2017",
"ann_file": "coco/annotations/image_info_test-dev2017.json"
},
"coco_2014_train": {
"img_dir": "coco/train2014",
"ann_file": "coco/annotations/instances_train2014.json"
},
"coco_2014_val": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_val2014.json"
},
"coco_2014_minival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_minival2014.json"
},
"coco_2014_valminusminival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_valminusminival2014.json"
},
"keypoints_coco_2014_train": {
"img_dir": "coco/train2014",
"ann_file": "coco/annotations/person_keypoints_train2014.json",
},
"keypoints_coco_2014_val": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/person_keypoints_val2014.json"
},
"keypoints_coco_2014_minival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/person_keypoints_minival2014.json",
},
"keypoints_coco_2014_valminusminival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/person_keypoints_valminusminival2014.json",
},
"voc_2007_train": {
"data_dir": "voc/VOC2007",
"split": "train"
},
"voc_2007_train_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_train2007.json"
},
"voc_2007_val": {
"data_dir": "voc/VOC2007",
"split": "val"
},
"voc_2007_val_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_val2007.json"
},
"voc_2007_test": {
"data_dir": "voc/VOC2007",
"split": "test"
},
"voc_2007_test_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_test2007.json"
},
"voc_2012_train": {
"data_dir": "voc/VOC2012",
"split": "train"
},
"voc_2012_train_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_train2012.json"
},
"voc_2012_val": {
"data_dir": "voc/VOC2012",
"split": "val"
},
"voc_2012_val_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_val2012.json"
},
"voc_2012_test": {
"data_dir": "voc/VOC2012",
"split": "test"
# PASCAL VOC2012 doesn't made the test annotations available, so there's no json annotation
},
"cityscapes_fine_instanceonly_seg_train_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_train.json"
},
"cityscapes_fine_instanceonly_seg_val_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_val.json"
},
"cityscapes_fine_instanceonly_seg_test_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_test.json"
}
}
@staticmethod
def get(name):
if "coco" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
root=os.path.join(data_dir, attrs["img_dir"]),
ann_file=os.path.join(data_dir, attrs["ann_file"]),
)
return dict(
factory="COCODataset",
args=args,
)
elif "voc" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
data_dir=os.path.join(data_dir, attrs["data_dir"]),
split=attrs["split"],
)
return dict(
factory="PascalVOCDataset",
args=args,
)
raise RuntimeError("Dataset not available: {}".format(name))
class ModelCatalog(object):
S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron"
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
"FAIR/20171220/X-101-64x4d": "ImageNetPretrained/20171220/X-101-64x4d.pkl",
}
C2_DETECTRON_SUFFIX = "output/train/{}coco_2014_train%3A{}coco_2014_valminusminival/generalized_rcnn/model_final.pkl"
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
"35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
"37129812/e2e_mask_rcnn_X-152-32x8d-FPN-IN5k_1.44x": "09_35_36.8pzTQKYK",
# keypoints
"37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "08_42_54.kdzV35ao"
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
if name.startswith("ImageNetPretrained"):
return ModelCatalog.get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog {}".format(name))
@staticmethod
def get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
name = name[len("ImageNetPretrained/"):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def get_c2_detectron_12_2017_baselines(name):
# Detectron C2 models are stored following the structure
# prefix/<model_id>/2012_2017_baselines/<model_name>.yaml.<signature>/suffix
# we use as identifiers in the catalog Caffe2Detectron/COCO/<model_id>/<model_name>
prefix = ModelCatalog.S3_C2_DETECTRON_URL
dataset_tag = "keypoints_" if "keypoint" in name else ""
suffix = ModelCatalog.C2_DETECTRON_SUFFIX.format(dataset_tag, dataset_tag)
# remove identification prefix
name = name[len("Caffe2Detectron/COCO/"):]
# split in <model_id> and <model_name>
model_id, model_name = name.split("/")
# parsing to make it match the url address from the Caffe2 models
model_name = "{}.yaml".format(model_name)
signature = ModelCatalog.C2_DETECTRON_MODELS[name]
unique_name = ".".join([model_name, signature])
url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
return url
| 40.585 | 121 | 0.605026 | [
"BSD-2-Clause"
] | choasup/FCOS | fcos_core/config/paths_catalog.py | 8,117 | Python |
import sys
from jennie.jennie_tools.command_handler import *
from jennie.ubuntu import *
def execute():
arguments = sys.argv[1:]
commands = CommandHandler().start(arguments)
if not commands or commands == None:
return
elif commands == True:
return
elif commands[0] == "ubuntu":
if commands[1] == "setup":
if commands[2] == "elk":
setup_elasticsearchkibana()
elif commands[2] == "elasticsearch":
setup_elasticsearch()
elif commands[2] == "lemp":
setup_lemp()
elif commands[2] == "phpmyadmin":
install_phpmyadmin()
elif commands[1] == "deploy":
info = take_user_input(DEPLOY_INFO_COMMANDS)
if commands[2] == "web":
deploy_folder_nginx(info["port"], info["domain"])
elif commands[2] == "django":
deploy_django(info["port"], info["domain"])
if __name__ == '__main__':
execute() | 29.028571 | 65 | 0.554134 | [
"MIT"
] | Ask-Jennie/ask-jennie | jennie/__init__.py | 1,016 | Python |
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
text = """<form method="post" action="/add/">
<input type="text" name="a" value="%d"> + <input type="text" name="b" value="%d">
<input type="submit" value="="> <input type="text" value="%d">
</form>"""
# @csrf_exempt
# def index(request):
# if 'a' in request.POST:
# a = int(request.POST['a'])
# b = int(request.POST['b'])
# else:
# a = 0
# b = 0
# return HttpResponse(text % (a,b,a+b))
@csrf_exempt
def index(request):
if 'a' in request.POST:
a = int(request.POST['a'])
b = int(request.POST['b'])
else:
a = 0
b = 0
return HttpResponse(text % (a, b, a + b)) | 25.793103 | 85 | 0.550802 | [
"MIT"
] | feonixsmj/Django-Python | newtest/newtest/add.py | 748 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ComplianceResultsOperations:
"""ComplianceResultsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
scope: str,
**kwargs: Any
) -> AsyncIterable["_models.ComplianceResultList"]:
"""Security compliance results in the subscription.
:param scope: Scope of the query, can be subscription
(/subscriptions/0b06d9ea-afe6-4779-bd59-30e5c2d9d13f) or management group
(/providers/Microsoft.Management/managementGroups/mgName).
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComplianceResultList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.models.ComplianceResultList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComplianceResultList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ComplianceResultList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/{scope}/providers/Microsoft.Security/complianceResults'} # type: ignore
async def get(
self,
resource_id: str,
compliance_result_name: str,
**kwargs: Any
) -> "_models.ComplianceResult":
"""Security Compliance Result.
:param resource_id: The identifier of the resource.
:type resource_id: str
:param compliance_result_name: name of the desired assessment compliance result.
:type compliance_result_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComplianceResult, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.ComplianceResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComplianceResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
'complianceResultName': self._serialize.url("compliance_result_name", compliance_result_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ComplianceResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{resourceId}/providers/Microsoft.Security/complianceResults/{complianceResultName}'} # type: ignore
| 45.438596 | 133 | 0.660746 | [
"MIT"
] | AFengKK/azure-sdk-for-python | sdk/security/azure-mgmt-security/azure/mgmt/security/aio/operations/_compliance_results_operations.py | 7,770 | Python |
# -*- coding: utf-8 -*-
import argparse, json, os
import numpy as np
import h5py
import codecs
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_txt', default='half_obama_weeklys.txt')
parser.add_argument('-o', '--output_h5', default='half_obama_weeklys.h5')
parser.add_argument('-oj', '--output_json', default='half_obama_weeklys.json')
parser.add_argument('-vf', '--val_frac', type=float, default=0.1)
parser.add_argument('-tf', '--test_frac', type=float, default=0.1)
parser.add_argument('-q', '--quiet', action='store_true')
parser.add_argument('-e', '--encoding', default='utf-8')
args = parser.parse_args()
if __name__ == '__main__':
if args.encoding == 'bytes': args.encoding = None
# First go the file once to see how big it is and to build the vocab
token_to_idx = {}
total_size = 0
with codecs.open(args.input_txt, 'r', args.encoding) as f:
for line in f:
total_size += len(line)
for char in line:
if char not in token_to_idx:
token_to_idx[char] = len(token_to_idx) + 1
# Now we can figure out the split sizes
val_size = int(args.val_frac * total_size)
test_size = int(args.test_frac * total_size)
train_size = total_size - val_size - test_size
if not args.quiet:
print 'Total vocabulary size: %d' % len(token_to_idx)
print 'Total tokens in file: %d' % total_size
print ' Training size: %d' % train_size
print ' Val size: %d' % val_size
print ' Test size: %d' % test_size
# Choose the datatype based on the vocabulary size
dtype = np.uint8
if len(token_to_idx) > 255:
dtype = np.uint32
if not args.quiet:
print 'Using dtype ', dtype
# Just load data into memory ... we'll have to do something more clever
# for huge datasets but this should be fine for now
train = np.zeros(train_size, dtype=dtype)
val = np.zeros(val_size, dtype=dtype)
test = np.zeros(test_size, dtype=dtype)
splits = [train, val, test]
# Go through the file again and write data to numpy arrays
split_idx, cur_idx = 0, 0
with codecs.open(args.input_txt, 'r', args.encoding) as f:
for line in f:
for char in line:
splits[split_idx][cur_idx] = token_to_idx[char]
cur_idx += 1
if cur_idx == splits[split_idx].size:
split_idx += 1
cur_idx = 0
# Write data to HDF5 file
with h5py.File(args.output_h5, 'w') as f:
f.create_dataset('train', data=train)
f.create_dataset('val', data=val)
f.create_dataset('test', data=test)
# For 'bytes' encoding, replace non-ascii characters so the json dump
# doesn't crash
if args.encoding is None:
new_token_to_idx = {}
for token, idx in token_to_idx.iteritems():
if ord(token) > 127:
new_token_to_idx['[%d]' % ord(token)] = idx
else:
new_token_to_idx[token] = idx
token_to_idx = new_token_to_idx
# Dump a JSON file for the vocab
json_data = {
'token_to_idx': token_to_idx,
'idx_to_token': {v: k for k, v in token_to_idx.iteritems()},
}
with open(args.output_json, 'w') as f:
json.dump(json_data, f)
| 32.861702 | 78 | 0.668177 | [
"MIT"
] | nateGeorge/obama_bot | scrape-prez-vids/scrapy/scrape_prez/preprocess.py | 3,089 | Python |
import psycopg2
import os
from dotenv import load_dotenv
load_dotenv() # Adds .env to memory
# postgres db connection
postgres_options = {
"host": os.getenv("POSTGRES_HOST"),
"database": os.getenv("POSTGRES_DATABASE"),
"user": os.getenv("POSTGRES_USER"),
"password": os.getenv("POSTGRES_PASSWORD")
}
db_conn = psycopg2.connect(**postgres_options)
c = db_conn.cursor()
private_keys: list = ((os.getenv("PRIVATE_KEYS")).split(','))
idol_folder = os.getenv("FOLDER_LOCATION")
top_gg_webhook_key = os.getenv("TOP_GG_WEBHOOK")
| 22.04 | 61 | 0.715064 | [
"MIT"
] | MujyKun/IreneAPI | resources/keys.py | 551 | Python |
# Generated by Django 2.2.8 on 2019-12-24 12:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Redirect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('source', models.CharField(max_length=50, unique=True)),
('destination', models.CharField(max_length=50, unique=True)),
],
),
]
| 25.26087 | 114 | 0.585198 | [
"MIT"
] | SharifAIChallenge/AIC21-Backend | apps/go/migrations/0001_initial.py | 581 | Python |
from xml.dom import minidom
from django.utils.datastructures import MultiValueDict
from django import forms
from django.utils.html import format_html, mark_safe
from django.forms.utils import flatatt
class SelectMultipleSVG(forms.SelectMultiple):
class Media:
js = ('django_svgselect.js',)
def __init__(self, svg):
super(SelectMultipleSVG, self).__init__()
# TODO: Add some validation here?
self.svg = svg
def render(self, name, value, attrs=None, choices=()):
svg = minidom.parse(self.svg)
if value is None:
value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select multiple="multiple"{}>', flatatt(final_attrs))]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
output.append("<div id='%s-svg'>" % final_attrs['id'])
output.append(svg.toxml())
output.append("</div>")
output.append("<script language='javascript'>document.getElementById('%s').convertToSvg('%s-svg');</script>" %
(final_attrs['id'], final_attrs['id']))
return mark_safe('\n'.join(output))
def value_from_datadict(self, data, files, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name) | 34.390244 | 118 | 0.634752 | [
"MIT"
] | jmickela/django_svgselect | django_svgselect/forms.py | 1,410 | Python |
""" Python 'unicode-internal' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_internal_encode
decode = codecs.unicode_internal_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.unicode_internal_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.unicode_internal_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module src
def getregentry():
return codecs.CodecInfo(
name='unicode-internal',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| 26 | 69 | 0.728261 | [
"MIT"
] | JacobMiske/nuclear-database-APIs | env/lib/python3.7/encodings/unicode_internal.py | 1,196 | Python |
import cv2
import numpy as np
from faster_rcnn import network
from faster_rcnn.faster_rcnn import FasterRCNN
from faster_rcnn.utils.timer import Timer
def test():
import os
im_file = 'demo/004545.jpg'
# im_file = 'data/VOCdevkit2007/VOC2007/JPEGImages/009036.jpg'
# im_file = '/media/longc/Data/data/2DMOT2015/test/ETH-Crossing/img1/000100.jpg'
image = cv2.imread(im_file)
model_file = '/home/zjwang/git/faster_rcnn_pytorch/VGGnet_fast_rcnn_iter_70000.h5'
# model_file = '/media/longc/Data/models/faster_rcnn_pytorch3/faster_rcnn_100000.h5'
# model_file = '/media/longc/Data/models/faster_rcnn_pytorch2/faster_rcnn_2000.h5'
detector = FasterRCNN()
network.load_net(model_file, detector)
detector.cuda()
detector.eval()
print('load model successfully!')
# network.save_net(r'/media/longc/Data/models/VGGnet_fast_rcnn_iter_70000.h5', detector)
# print('save model succ')
t = Timer()
t.tic()
# image = np.zeros(shape=[600, 800, 3], dtype=np.uint8) + 255
dets, scores, classes = detector.detect(image, 0.7)
runtime = t.toc()
print('total spend: {}s'.format(runtime))
im2show = np.copy(image)
for i, det in enumerate(dets):
det = tuple(int(x) for x in det)
cv2.rectangle(im2show, det[0:2], det[2:4], (255, 205, 51), 2)
cv2.putText(im2show, '%s: %.3f' % (classes[i], scores[i]), (det[0], det[1] + 15), cv2.FONT_HERSHEY_PLAIN,
1.0, (0, 0, 255), thickness=1)
cv2.imwrite(os.path.join('demo', 'out.jpg'), im2show)
cv2.imshow('demo', im2show)
cv2.waitKey(0)
if __name__ == '__main__':
test()
| 34.93617 | 113 | 0.668697 | [
"MIT"
] | princeward/faster_rcnn_pytorch | demo.py | 1,642 | Python |
#importieren aller notwenigen Bibliotheken
import tensorflow.compat.v1 as tf
#Die Hauptbibliothek Tensorflow wird geladen
from tensorflow.keras.models import Sequential, save_model
from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPool2D, MaxPooling2D, Dense, Dropout, Activation, Flatten
from tensorflow.keras.optimizers import RMSprop, Adagrad, Adam
import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
import csv
Epochen=60
Trainingsbilder = []
Trainingslabels = []
print("Trainingsdaten werden geladen")
for i in range(0,43):
n = str(i)
Pfad = "GTSRB_Final_Training_Images/GTSRB/Final_Training/images/" + n
label=i
for Datei in os.listdir(Pfad):
img = os.path.join(Pfad,Datei)
#Bilder werden auf die Größe 32*32 Pixel mit RGB skaliert, damit diese eine einheitliche Größe haben
img = image.load_img(img,target_size=(32,32))
img = image.img_to_array(img, dtype=np.float32)
img=img.reshape(1,32,32,3)
Trainingsbilder.append(img)
Trainingslabels.append(label)
#Doppeltes Hinzufügen der Trainingsbilder aus Bildklassen mit wenig Trainingsbildern
if i==0 or i==6 or i==18 or i==16 or i==19 or i==20 or i==21 or i==24 or i==27 or i==29 or i==32 or i==37:
Trainingsbilder.append(img)
Trainingslabels.append(label)
#Umformung der Liste mit den Trainingsbildern in einen Tensor
Trainingslabels = np.asarray(Trainingslabels)
Trainingsbilder = np.asarray([Trainingsbilder])
Trainingsbilder = Trainingsbilder.reshape(-1, 32, 32, 3)
#Umwandlung der Farbwerte in Gleitkommazahlen zwischen 0 und 1
Trainingsbilder = Trainingsbilder/255
Trainingsbilder = np.asarray(Trainingsbilder, dtype = "float32")
Trainingslabels = np.asarray(Trainingslabels, dtype= "float32")
Testbilder = []
Testlabels = []
print()
print("Testdaten werden geladen")
#Laden der Testbilder als deren Bildtensoren in eine Liste
Testpfad="GTSRB_Final_Test_Images/GTSRB/Final_Test/images/"
for Datei in os.listdir(Testpfad):
img = os.path.join(Testpfad,Datei)
#Umformung der Testbilder in die Größe 32*32 Pixel
img = image.load_img(img,target_size=(32,32))
img = image.img_to_array(img, dtype=np.float32)
img = img.reshape(1,32,32, 3)
Testbilder.append(img)
#Auslesen der richtigen Bildklassen der Testbilder aus einer CSV-Datei
with open('Testdaten.csv') as csvdatei:
csv_datei = csv.reader(csvdatei)
for Reihe in csv_datei:
Testlabels.append(Reihe[6])
#Umformung der Liste mit den Testbildern in einen Tensor
Testlabels.pop(0)
Testlabels = np.asarray(Testlabels)
Testbilder = np.asarray([Testbilder])
Testbilder = Testbilder.reshape(-1, 32, 32, 3)
#Umwandlung der Farbwerte in Gleitkommazahlen zwischen 0 und 1
Testbilder = Testbilder/255
Testbilder = np.asarray(Testbilder, dtype = "float32")
Testlabels = np.asarray(Testlabels, dtype= "float32")
#Die csv-Datei Messdaten wird angelegt und eine neue Versuchreihe darin notiert
with open('Messdaten.csv', mode='a') as employee_file:
employee_writer = csv.writer(employee_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
employee_writer.writerow('neue Versuchsreihe')
#Zusammenstellen des Neuronalen Netzes
#zuerst Zusammenstellen der Filter mit Batchnormalisierung (3 Convolutional Filter, 2 Pooling Filter)
model = Sequential(name='CNN')
model.add(Conv2D(32, (3, 3), activation='selu', padding='same',input_shape=(32,32,3)))
model.add(BatchNormalization())
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(BatchNormalization())
#Umformung des Veränderten Tensors in einen langen Vektor
model.add(Flatten())
#Aufstellen der 3 Neuronenschichten mit 750, 256 und 43 Neuronen, Festlegen der Dropoutraten
#Neuronenzahl der 1. Schicht
model.add(Dense(750))
#Aktivierungsfunktion relu
model.add(Activation('relu'))
#Dropout festlegen
model.add(Dropout(0.4))
#Batchnormalisierung
model.add(BatchNormalization())
#weitere Schichten
model.add(Dense(256))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.6))
model.add(Dense(43))
#Softmax zur Umwandlung in Klassenwahrscheinlichkeiten
model.add(Activation('softmax'))
#festlegen von Verlustfunktion, Optimizer und metrics
model.compile(loss='sparse_categorical_crossentropy',
optimizer='Adam',
metrics=['accuracy'])
#Befehl zum Trainieren des Netzes über 60 Epochen mit shuffle, Batchsize 32
#Trainiert wird mit den Trainingsbildern, nach jeder Trainingsepoche wird auf die Genauigkeit im Testdatensatz getestet
for i in range(Epochen):
model.fit(Trainingsbilder, Trainingslabels, epochs=1, shuffle=True, batch_size=32)
#aus den Ergebnissen wird eine Genauigkeit im Testdatensatz errechnet, sowie ein durchschnittlicher Verlust
score=model.evaluate(Testbilder, Testlabels)
#in die csv-Datei wird nun Epoche, Genauigkeit und Verlust geschrieben, sodass man diese später auswerten kann
with open('Messdaten.csv', mode='a') as data_file:
data_writer = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data_writer.writerow(['Epoche:',i])
data_writer.writerow(['Genauigkeit:',score[1]])
data_writer.writerow(['Loss:',score[0]])
print('Epoche',i+1)
print('Test Verlust:', score[0])
print('Test Genauigkeit:', score[1])
#speichern des trainierten Models im hdf5-Format, falls es über 99% Genauigkeit im testdatensatz hat
if score[1]>0.99:
model.save('model_'+str(score[1])+'.hdf5')
print("gespeichert")
| 41.183673 | 125 | 0.734721 | [
"MIT"
] | Tom-Haustein/GTSRB_Neural_Network | Programme/Trainingsprogramm_mit_Messdaten.py | 6,065 | Python |
Subsets and Splits