code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
# This file is part of the python-chess library.
# Copyright (C) 2015 Jean-Noël Avila <[email protected]>
# Copyright (C) 2015-2016 Niklas Fiekas <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ctypes
import ctypes.util
import fnmatch
import os
import os.path
import logging
import struct
import chess
try:
import backport_collections as collections
except ImportError:
import collections
LOGGER = logging.getLogger(__name__)
NOSQUARE = 64
NOINDEX = -1
WHITES = 1 << 6
BLACKS = 1 << 7
NOPIECE = 0
PAWN = 1
KNIGHT = 2
BISHOP = 3
ROOK = 4
QUEEN = 5
KING = 6
wK = KING | WHITES
wP = PAWN | WHITES
wN = KNIGHT | WHITES
wB = BISHOP | WHITES
wR = ROOK | WHITES
wQ = QUEEN | WHITES
bK = KING | BLACKS
bP = PAWN | BLACKS
bN = KNIGHT | BLACKS
bB = BISHOP | BLACKS
bR = ROOK | BLACKS
bQ = QUEEN | BLACKS
MAX_KKINDEX = 462
MAX_PPINDEX = 576
MAX_PpINDEX = 24 * 48
MAX_AAINDEX = (63 - 62) + (62 // 2 * (127 - 62)) - 1 + 1
MAX_AAAINDEX = 64 * 21 * 31
MAX_PPP48_INDEX = 8648
MAX_PP48_INDEX = 1128
MAX_KXK = MAX_KKINDEX * 64
MAX_kabk = MAX_KKINDEX * 64 * 64
MAX_kakb = MAX_KKINDEX * 64 * 64
MAX_kpk = 24 * 64 * 64
MAX_kakp = 24 * 64 * 64 * 64
MAX_kapk = 24 * 64 * 64 * 64
MAX_kppk = MAX_PPINDEX * 64 * 64
MAX_kpkp = MAX_PpINDEX * 64 * 64
MAX_kaak = MAX_KKINDEX * MAX_AAINDEX
MAX_kabkc = MAX_KKINDEX * 64 * 64 * 64
MAX_kabck = MAX_KKINDEX * 64 * 64 * 64
MAX_kaakb = MAX_KKINDEX * MAX_AAINDEX * 64
MAX_kaabk = MAX_KKINDEX * MAX_AAINDEX * 64
MAX_kabbk = MAX_KKINDEX * MAX_AAINDEX * 64
MAX_kaaak = MAX_KKINDEX * MAX_AAAINDEX
MAX_kapkb = 24 * 64 * 64 * 64 * 64
MAX_kabkp = 24 * 64 * 64 * 64 * 64
MAX_kabpk = 24 * 64 * 64 * 64 * 64
MAX_kppka = MAX_kppk * 64
MAX_kappk = MAX_kppk * 64
MAX_kapkp = MAX_kpkp * 64
MAX_kaapk = 24 * MAX_AAINDEX * 64 * 64
MAX_kaakp = 24 * MAX_AAINDEX * 64 * 64
MAX_kppkp = 24 * MAX_PP48_INDEX * 64 * 64
MAX_kpppk = MAX_PPP48_INDEX * 64 * 64
PLYSHIFT = 3
INFOMASK = 7
WE_FLAG = 1
NS_FLAG = 2
NW_SE_FLAG = 4
ITOSQ = [
chess.H7, chess.G7, chess.F7, chess.E7,
chess.H6, chess.G6, chess.F6, chess.E6,
chess.H5, chess.G5, chess.F5, chess.E5,
chess.H4, chess.G4, chess.F4, chess.E4,
chess.H3, chess.G3, chess.F3, chess.E3,
chess.H2, chess.G2, chess.F2, chess.E2,
chess.D7, chess.C7, chess.B7, chess.A7,
chess.D6, chess.C6, chess.B6, chess.A6,
chess.D5, chess.C5, chess.B5, chess.A5,
chess.D4, chess.C4, chess.B4, chess.A4,
chess.D3, chess.C3, chess.B3, chess.A3,
chess.D2, chess.C2, chess.B2, chess.A2,
]
ENTRIES_PER_BLOCK = 16 * 1024
EGTB_MAXBLOCKSIZE = 65536
def map24_b(s):
s = s - 8
return ((s & 3) + s) >> 1
def in_queenside(x):
return (x & (1 << 2)) == 0
def flip_we(x):
return x ^ 7
def flip_ns(x):
return x ^ 56
def flip_nw_se(x):
return ((x & 7) << 3) | (x >> 3)
def idx_is_empty(x):
return x == -1
def getcol(x):
return x & 7
def getrow(x):
return x >> 3
def flip_type(x, y):
ret = 0
if getcol(x) > 3:
x = flip_we(x)
y = flip_we(y)
ret |= 1
if getrow(x) > 3:
x = flip_ns(x)
y = flip_ns(y)
ret |= 2
rowx = getrow(x)
colx = getcol(x)
if rowx > colx:
x = flip_nw_se(x)
y = flip_nw_se(y)
ret |= 4
rowy = getrow(y)
coly = getcol(y)
if rowx == colx and rowy > coly:
x = flip_nw_se(x)
y = flip_nw_se(y)
ret |= 4
return ret
def init_flipt():
return [[flip_type(j, i) for i in range(64)] for j in range(64)]
FLIPT = init_flipt()
def init_pp48_idx():
MAX_I = 48
MAX_J = 48
idx = 0
pp48_idx = [[-1] * MAX_J for i in range(MAX_I)]
pp48_sq_x = [NOSQUARE] * MAX_PP48_INDEX
pp48_sq_y = [NOSQUARE] * MAX_PP48_INDEX
idx = 0
for a in range(chess.H7, chess.A2 - 1, -1):
for b in range(a - 1, chess.A2 - 1, -1):
i = flip_we(flip_ns(a)) - 8
j = flip_we(flip_ns(b)) - 8
if idx_is_empty(pp48_idx[i][j]):
pp48_idx[i][j] = idx
pp48_idx[j][i] = idx
pp48_sq_x[idx] = i
pp48_sq_y[idx] = j
idx += 1
return pp48_idx, pp48_sq_x, pp48_sq_y
PP48_IDX, PP48_SQ_X, PP48_SQ_Y = init_pp48_idx()
def init_ppp48_idx():
MAX_I = 48
MAX_J = 48
MAX_K = 48
ppp48_idx = [[[-1] * MAX_I for j in range(MAX_J)] for k in range(MAX_K)]
ppp48_sq_x = [NOSQUARE] * MAX_PPP48_INDEX
ppp48_sq_y = [NOSQUARE] * MAX_PPP48_INDEX
ppp48_sq_z = [NOSQUARE] * MAX_PPP48_INDEX
idx = 0
for x in range(48):
for y in range(x + 1, 48):
for z in range(y + 1, 48):
a = ITOSQ[x]
b = ITOSQ[y]
c = ITOSQ[z]
if not in_queenside(b) or not in_queenside(c):
continue
i = a - 8
j = b - 8
k = c - 8
if idx_is_empty(ppp48_idx[i][j][k]):
ppp48_idx[i][j][k] = idx
ppp48_idx[i][k][j] = idx
ppp48_idx[j][i][k] = idx
ppp48_idx[j][k][i] = idx
ppp48_idx[k][i][j] = idx
ppp48_idx[k][j][i] = idx
ppp48_sq_x[idx] = i
ppp48_sq_y[idx] = j
ppp48_sq_z[idx] = k
idx = idx + 1
return ppp48_idx, ppp48_sq_x, ppp48_sq_y, ppp48_sq_z
PPP48_IDX, PPP48_SQ_X, PPP48_SQ_Y, PPP48_SQ_Z = init_ppp48_idx()
def init_aaidx():
aaidx = [[-1] * 64 for y in range(64)]
aabase = [0] * MAX_AAINDEX
idx = 0
for x in range(64):
for y in range(x + 1, 64):
if idx_is_empty(aaidx[x][y]):
# Still empty.
aaidx[x][y] = idx
aaidx[y][x] = idx
aabase[idx] = x
idx += 1
return aabase, aaidx
AABASE, AAIDX = init_aaidx()
def init_aaa():
# Get aaa_base.
comb = [a * (a - 1) // 2 for a in range(64)]
accum = 0
aaa_base = [0] * 64
for a in range(64 - 1):
accum = accum + comb[a]
aaa_base[a + 1] = accum
# Get aaa_xyz.
aaa_xyz = [[-1] * 3 for idx in range(MAX_AAAINDEX)]
idx = 0
for z in range(64):
for y in range(z):
for x in range(y):
aaa_xyz[idx][0] = x
aaa_xyz[idx][1] = y
aaa_xyz[idx][2] = z
idx += 1
return aaa_base, aaa_xyz
AAA_BASE, AAA_XYZ = init_aaa()
def pp_putanchorfirst(a, b):
row_b = b & 56
row_a = a & 56
# Default.
anchor = a
loosen = b
if row_b > row_a:
anchor = b
loosen = a
elif row_b == row_a:
x = a
col = x & 7
inv = col ^ 7
x = (1 << col) | (1 << inv)
x &= (x - 1)
hi_a = x
x = b
col = x & 7
inv = col ^ 7
x = (1 << col) | (1 << inv)
x &= (x - 1)
hi_b = x
if hi_b > hi_a:
anchor = b
loosen = a
if hi_b < hi_a:
anchor = a
loosen = b
if hi_b == hi_a:
if a < b:
anchor = a
loosen = b
else:
anchor = b
loosen = a
return anchor, loosen
def wsq_to_pidx24(pawn):
sq = pawn
sq = flip_ns(sq)
sq -= 8 # Down one row.
idx24 = (sq + (sq & 3)) >> 1
return idx24
def wsq_to_pidx48(pawn):
sq = pawn
sq = flip_ns(sq)
sq -= 8 # Down one row.
idx48 = sq
return idx48
def init_ppidx():
ppidx = [[-1] * 48 for i in range(24)]
pp_hi24 = [-1] * MAX_PPINDEX
pp_lo48 = [-1] * MAX_PPINDEX
idx = 0
for a in range(chess.H7, chess.A2 - 1, -1):
if in_queenside(a):
continue
for b in range(a - 1, chess.A2 - 1, -1):
anchor = 0
loosen = 0
anchor, loosen = pp_putanchorfirst(a, b)
if (anchor & 7) > 3:
# Square in the kingside.
anchor = flip_we(anchor)
loosen = flip_we(loosen)
i = wsq_to_pidx24(anchor)
j = wsq_to_pidx48(loosen)
if idx_is_empty(ppidx[i][j]):
ppidx[i][j] = idx
pp_hi24[idx] = i
pp_lo48[idx] = j
idx += 1
return ppidx, pp_hi24, pp_lo48
PPIDX, PP_HI24, PP_LO48 = init_ppidx()
def bb_isbiton(bb, bit):
return 0 != (bb >> bit) & 1
def map88(x):
return x + (x & 56)
def unmap88(x):
return x + (x & 7) >> 1
def mapx88(x):
return ((x & 56) << 1) | (x & 7)
BSTEP = [17, 15, -15, -17, 0]
RSTEP = [1, 16, -1, -16, 0]
NSTEP = [18, 33, 31, 14, -18, -33, -31, -14, 0]
KSTEP = [1, 17, 16, 15, -1, -17, -16, -15, 0]
PSTEPARR = [
None, # No piece.
None, # Pawn.
NSTEP,
BSTEP,
RSTEP,
KSTEP, # Queen.
KSTEP, # King.
]
PSLIDER = [
False, # No piece.
False, # Pawn.
False,
True,
True,
True,
False,
]
def gen_rev(occ, input_piece, sq):
# Get list of reversible piece moves. Yields squares.
from_ = map88(sq)
pc = input_piece & (PAWN | KNIGHT | BISHOP | ROOK | QUEEN | KING)
steparr = PSTEPARR[pc]
slider = PSLIDER[pc]
if slider:
for step in steparr:
if step == 0:
break
s = from_ + step
while 0 == (s & 0x88):
us = unmap88(s)
if 0 != (0x1 & (occ >> us)):
break
yield us
s += step
else:
for step in steparr:
if step == 0:
break
s = from_ + step
if 0 == (s & 0x88):
us = unmap88(s)
if 0 == (0x1 & (occ >> us)):
yield us
def reach_init():
stp_a = [15, -15]
stp_b = [17, -17]
reach = [[-1] * 64 for _ in range(7)]
for pc in range(KNIGHT, KING + 1):
for sq in range(64):
bb = 0
for li in gen_rev(0, pc, sq):
bb |= 1 << li
reach[pc][sq] = bb
for side in range(2):
index = 1 ^ side
step_a = stp_a[side]
step_b = stp_b[side]
for sq in range(64):
sq88 = map88(sq)
bb = 0
thelist = []
s = sq88 + step_a
if 0 == (s & 0x88):
us = unmap88(s)
thelist.append(us)
s = sq88 + step_b
if 0 == (s & 0x88):
us = unmap88(s)
thelist.append(us)
for li in thelist:
bb |= 1 << li
reach[index][sq] = bb
return reach
REACH = reach_init()
def attack_maps_init():
attmsk = [0] * 256
attmsk[wP] = 1 << 0
attmsk[bP] = 1 << 1
attmsk[KNIGHT] = 1 << 2
attmsk[wN] = 1 << 2
attmsk[bN] = 1 << 2
attmsk[BISHOP] = 1 << 3
attmsk[wB] = 1 << 3
attmsk[bB] = 1 << 3
attmsk[ROOK] = 1 << 4
attmsk[wR] = 1 << 4
attmsk[bR] = 1 << 4
attmsk[QUEEN] = 1 << 5
attmsk[wQ] = 1 << 5
attmsk[bQ] = 1 << 5
attmsk[KING] = 1 << 6
attmsk[wK] = 1 << 6
attmsk[bK] = 1 << 6
attmap = [[0] * 64 for i in range(64)]
for to_ in range(64):
for from_ in range(64):
m = 0
rook = REACH[ROOK][from_]
bishop = REACH[BISHOP][from_]
queen = REACH[QUEEN][from_]
knight = REACH[KNIGHT][from_]
king = REACH[KING][from_]
if bb_isbiton(knight, to_):
m |= attmsk[wN]
if bb_isbiton(king, to_):
m |= attmsk[wK]
if bb_isbiton(rook, to_):
m |= attmsk[wR]
if bb_isbiton(bishop, to_):
m |= attmsk[wB]
if bb_isbiton(queen, to_):
m |= attmsk[wQ]
to88 = mapx88(to_)
fr88 = mapx88(from_)
diff = to88 - fr88
if diff in [17, 15]:
m |= attmsk[wP]
elif diff in [-17, -15]:
m |= attmsk[bP]
attmap[to_][from_] = m
return attmsk, attmap
ATTMSK, ATTMAP = attack_maps_init()
def possible_attack(from_, to_, piece):
return 0 != ATTMAP[to_][from_] & ATTMSK[piece]
def norm_kkindex(x, y):
if getcol(x) > 3:
x = flip_we(x)
y = flip_we(y)
if getrow(x) > 3:
x = flip_ns(x)
y = flip_ns(y)
rowx = getrow(x)
colx = getcol(x)
if (rowx > colx):
x = flip_nw_se(x)
y = flip_nw_se(y)
rowy = getrow(y)
coly = getcol(y)
if rowx == colx and rowy > coly:
x = flip_nw_se(x)
y = flip_nw_se(y)
return x, y
def init_kkidx():
kkidx = [[-1] * 64 for x in range(64)]
bksq = [-1] * MAX_KKINDEX
wksq = [-1] * MAX_KKINDEX
idx = 0
for x in range(64):
for y in range(64):
# Check if x to y is legal.
if not possible_attack(x, y, wK) and x != y:
# Normalize.
i, j = norm_kkindex(x, y)
if idx_is_empty(kkidx[i][j]):
kkidx[i][j] = idx
kkidx[x][y] = idx
bksq[idx] = i
wksq[idx] = j
idx += 1
return kkidx, wksq, bksq
KKIDX, WKSQ, BKSQ = init_kkidx()
def kxk_pctoindex(c):
BLOCK_Ax = 64
ft = flip_type(c.black_piece_squares[0], c.white_piece_squares[0])
ws = c.white_piece_squares
bs = c.black_piece_squares
if (ft & 1) != 0:
ws = [flip_we(b) for b in ws]
bs = [flip_we(b) for b in bs]
if (ft & 2) != 0:
ws = [flip_ns(b) for b in ws]
bs = [flip_ns(b) for b in bs]
if (ft & 4) != 0:
ws = [flip_nw_se(b) for b in ws]
bs = [flip_nw_se(b) for b in bs]
ki = KKIDX[bs[0]][ws[0]] # KKIDX[black king][white king]
if ki == -1:
return NOINDEX
return ki * BLOCK_Ax + ws[1]
def kapkb_pctoindex(c):
BLOCK_A = 64 * 64 * 64 * 64
BLOCK_B = 64 * 64 * 64
BLOCK_C = 64 * 64
BLOCK_D = 64
pawn = c.white_piece_squares[2]
wa = c.white_piece_squares[1]
wk = c.white_piece_squares[0]
bk = c.black_piece_squares[0]
ba = c.black_piece_squares[1]
if not (chess.A2 <= pawn < chess.A8):
return NOINDEX
if (pawn & 7) > 3:
# Column is more than 3, i.e. e, f, g or h.
pawn = flip_we(pawn)
wk = flip_we(wk)
bk = flip_we(bk)
wa = flip_we(wa)
ba = flip_we(ba)
sq = pawn
sq ^= 56 # flip_ns
sq -= 8 # down one row
pslice = (sq + (sq & 3)) >> 1
return pslice * BLOCK_A + wk * BLOCK_B + bk * BLOCK_C + wa * BLOCK_D + ba
def kabpk_pctoindex(c):
BLOCK_A = 64 * 64 * 64 * 64
BLOCK_B = 64 * 64 * 64
BLOCK_C = 64 * 64
BLOCK_D = 64
wk = c.white_piece_squares[0]
wa = c.white_piece_squares[1]
wb = c.white_piece_squares[2]
pawn = c.white_piece_squares[3]
bk = c.black_piece_squares[0]
if (pawn & 7) > 3:
# Column is more than 3, i.e. e, f, g or h.
pawn = flip_we(pawn)
wk = flip_we(wk)
bk = flip_we(bk)
wa = flip_we(wa)
wb = flip_we(wb)
pslice = wsq_to_pidx24(pawn)
return pslice * BLOCK_A + wk * BLOCK_B + bk * BLOCK_C + wa * BLOCK_D + wb
def kabkp_pctoindex(c):
BLOCK_A = 64 * 64 * 64 * 64
BLOCK_B = 64 * 64 * 64
BLOCK_C = 64 * 64
BLOCK_D = 64
pawn = c.black_piece_squares[1]
wa = c.white_piece_squares[1]
wk = c.white_piece_squares[0]
bk = c.black_piece_squares[0]
wb = c.white_piece_squares[2]
if not (chess.A2 <= pawn < chess.A8):
return NOINDEX
if (pawn & 7) > 3:
# Column is more than 3, i.e. e, f, g or h.
pawn = flip_we(pawn)
wk = flip_we(wk)
bk = flip_we(bk)
wa = flip_we(wa)
wb = flip_we(wb)
sq = pawn
sq -= 8 # down one row
pslice = (sq + (sq & 3)) >> 1
return pslice * BLOCK_A + wk * BLOCK_B + bk * BLOCK_C + wa * BLOCK_D + wb
def kaapk_pctoindex(c):
BLOCK_C = MAX_AAINDEX
BLOCK_B = 64 * BLOCK_C
BLOCK_A = 64 * BLOCK_B
wk = c.white_piece_squares[0]
wa = c.white_piece_squares[1]
wa2 = c.white_piece_squares[2]
pawn = c.white_piece_squares[3]
bk = c.black_piece_squares[0]
if (pawn & 7) > 3:
# Column is more than 3, i.e. e, f, g or h.
pawn = flip_we(pawn)
wk = flip_we(wk)
bk = flip_we(bk)
wa = flip_we(wa)
wa2 = flip_we(wa2)
pslice = wsq_to_pidx24(pawn)
aa_combo = AAIDX[wa][wa2]
if idx_is_empty(aa_combo):
return NOINDEX
return pslice * BLOCK_A + wk * BLOCK_B + bk * BLOCK_C + aa_combo
def kaakp_pctoindex(c):
BLOCK_C = MAX_AAINDEX
BLOCK_B = 64 * BLOCK_C
BLOCK_A = 64 * BLOCK_B
wk = c.white_piece_squares[0]
wa = c.white_piece_squares[1]
wa2 = c.white_piece_squares[2]
bk = c.black_piece_squares[0]
pawn = c.black_piece_squares[1]
if (pawn & 7) > 3:
# Column is more than 3, i.e. e, f, g or h.
pawn = flip_we(pawn)
wk = flip_we(wk)
bk = flip_we(bk)
wa = flip_we(wa)
wa2 = flip_we(wa2)
pawn = flip_ns(pawn)
pslice = wsq_to_pidx24(pawn)
aa_combo = AAIDX[wa][wa2]
if idx_is_empty(aa_combo):
return NOINDEX
return pslice * BLOCK_A + wk * BLOCK_B + bk * BLOCK_C + aa_combo
def kapkp_pctoindex(c):
BLOCK_A = 64 * 64 * 64
BLOCK_B = 64 * 64
BLOCK_C = 64
wk = c.white_piece_squares[0]
wa = c.white_piece_squares[1]
pawn_a = c.white_piece_squares[2]
bk = c.black_piece_squares[0]
pawn_b = c.black_piece_squares[1]
anchor = pawn_a
loosen = pawn_b
if (anchor & 7) > 3:
# Column is more than 3, i.e. e, f, g or h.
anchor = flip_we(anchor)
loosen = flip_we(loosen)
wk = flip_we(wk)
bk = flip_we(bk)
wa = flip_we(wa)
m = wsq_to_pidx24(anchor)
n = loosen - 8
pp_slice = m * 48 + n
if idx_is_empty(pp_slice):
return NOINDEX
return pp_slice * BLOCK_A + wk * BLOCK_B + bk * BLOCK_C + wa
def kappk_pctoindex(c):
BLOCK_A = 64 * 64 * 64
BLOCK_B = 64 * 64
BLOCK_C = 64
wk = c.white_piece_squares[0]
wa = c.white_piece_squares[1]
pawn_a = c.white_piece_squares[2]
pawn_b = c.white_piece_squares[3]
bk = c.black_piece_squares[0]
anchor, loosen = pp_putanchorfirst(pawn_a, pawn_b)
if (anchor & 7) > 3:
# Column is more than 3, i.e. e, f, g or h.
anchor = flip_we(anchor)
loosen = flip_we(loosen)
wk = flip_we(wk)
bk = flip_we(bk)
wa = flip_we(wa)
i = wsq_to_pidx24(anchor)
j = wsq_to_pidx48(loosen)
pp_slice = PPIDX[i][j]
if idx_is_empty(pp_slice):
return NOINDEX
return pp_slice * BLOCK_A + wk * BLOCK_B + bk * BLOCK_C + wa
def kppka_pctoindex(c):
BLOCK_A = 64 * 64 * 64
BLOCK_B = 64 * 64
BLOCK_C = 64
wk = c.white_piece_squares[0]
pawn_a = c.white_piece_squares[1]
pawn_b = c.white_piece_squares[2]
bk = c.black_piece_squares[0]
ba = c.black_piece_squares[1]
anchor, loosen = pp_putanchorfirst(pawn_a, pawn_b)
if (anchor & 7) > 3:
anchor = flip_we(anchor)
loosen = flip_we(loosen)
wk = flip_we(wk)
bk = flip_we(bk)
ba = flip_we(ba)
i = wsq_to_pidx24(anchor)
j = wsq_to_pidx48(loosen)
pp_slice = PPIDX[i][j]
if idx_is_empty(pp_slice):
return NOINDEX
return pp_slice * BLOCK_A + wk * BLOCK_B + bk * BLOCK_C + ba
def kabck_pctoindex(c):
N_WHITE = 4
N_BLACK = 1
BLOCK_A = 64 * 64 * 64
BLOCK_B = 64 * 64
BLOCK_C = 64
ft = FLIPT[c.black_piece_squares[0]][c.white_piece_squares[0]]
ws = c.white_piece_squares[:N_WHITE]
bs = c.black_piece_squares[:N_BLACK]
if (ft & WE_FLAG) != 0:
ws = [flip_we(i) for i in ws]
bs = [flip_we(i) for i in bs]
if (ft & NS_FLAG) != 0:
ws = [flip_ns(i) for i in ws]
bs = [flip_ns(i) for i in bs]
if (ft & NW_SE_FLAG) != 0:
ws = [flip_nw_se(i) for i in ws]
bs = [flip_nw_se(i) for i in bs]
ki = KKIDX[bs[0]][ws[0]] # KKIDX[black king][white king]
if idx_is_empty(ki):
return NOINDEX
return ki * BLOCK_A + ws[1] * BLOCK_B + ws[2] * BLOCK_C + ws[3]
def kabbk_pctoindex(c):
N_WHITE = 4
N_BLACK = 1
BLOCK_Bx = 64
BLOCK_Ax = BLOCK_Bx * MAX_AAINDEX
ft = FLIPT[c.black_piece_squares[0]][c.white_piece_squares[0]]
ws = c.white_piece_squares[:N_WHITE]
bs = c.black_piece_squares[:N_BLACK]
if (ft & WE_FLAG) != 0:
ws = [flip_we(i) for i in ws]
bs = [flip_we(i) for i in bs]
if (ft & NS_FLAG) != 0:
ws = [flip_ns(i) for i in ws]
bs = [flip_ns(i) for i in bs]
if (ft & NW_SE_FLAG) != 0:
ws = [flip_nw_se(i) for i in ws]
bs = [flip_nw_se(i) for i in bs]
ki = KKIDX[bs[0]][ws[0]] # KKIDX[black king][white king]
ai = AAIDX[ws[2]][ws[3]]
if idx_is_empty(ki) or idx_is_empty(ai):
return NOINDEX
return ki * BLOCK_Ax + ai * BLOCK_Bx + ws[1]
def kaabk_pctoindex(c):
N_WHITE = 4
N_BLACK = 1
BLOCK_Bx = 64
BLOCK_Ax = BLOCK_Bx * MAX_AAINDEX
ft = FLIPT[c.black_piece_squares[0]][c.white_piece_squares[0]]
ws = c.white_piece_squares[:N_WHITE]
bs = c.black_piece_squares[:N_BLACK]
if ((ft & WE_FLAG) != 0):
ws = [flip_we(i) for i in ws]
bs = [flip_we(i) for i in bs]
if ((ft & NS_FLAG) != 0):
ws = [flip_ns(i) for i in ws]
bs = [flip_ns(i) for i in bs]
if ((ft & NW_SE_FLAG) != 0):
ws = [flip_nw_se(i) for i in ws]
bs = [flip_nw_se(i) for i in bs]
ki = KKIDX[bs[0]][ws[0]] # KKIDX[black king][white king]
ai = AAIDX[ws[1]][ws[2]]
if idx_is_empty(ki) or idx_is_empty(ai):
return NOINDEX
return ki * BLOCK_Ax + ai * BLOCK_Bx + ws[3]
def aaa_getsubi(x, y, z):
bse = AAA_BASE[z]
calc_idx = x + (y - 1) * y // 2 + bse
return calc_idx
def kaaak_pctoindex(c):
N_WHITE = 4
N_BLACK = 1
BLOCK_Ax = MAX_AAAINDEX
ws = c.white_piece_squares[:N_WHITE]
bs = c.black_piece_squares[:N_BLACK]
ft = FLIPT[c.black_piece_squares[0]][c.white_piece_squares[0]]
if (ft & WE_FLAG) != 0:
ws = [flip_we(i) for i in ws]
bs = [flip_we(i) for i in bs]
if (ft & NS_FLAG) != 0:
ws = [flip_ns(i) for i in ws]
bs = [flip_ns(i) for i in bs]
if (ft & NW_SE_FLAG) != 0:
ws = [flip_nw_se(i) for i in ws]
bs = [flip_nw_se(i) for i in bs]
if ws[2] < ws[1]:
tmp = ws[1]
ws[1] = ws[2]
ws[2] = tmp
if ws[3] < ws[2]:
tmp = ws[2]
ws[2] = ws[3]
ws[3] = tmp
if ws[2] < ws[1]:
tmp = ws[1]
ws[1] = ws[2]
ws[2] = tmp
ki = KKIDX[bs[0]][ws[0]]
if ws[1] == ws[2] or ws[1] == ws[3] or ws[2] == ws[3]:
return NOINDEX
ai = aaa_getsubi(ws[1], ws[2], ws[3])
if idx_is_empty(ki) or idx_is_empty(ai):
return NOINDEX
return ki * BLOCK_Ax + ai
def kppkp_pctoindex(c):
BLOCK_Ax = MAX_PP48_INDEX * 64 * 64
BLOCK_Bx = 64 * 64
BLOCK_Cx = 64
wk = c.white_piece_squares[0]
pawn_a = c.white_piece_squares[1]
pawn_b = c.white_piece_squares[2]
bk = c.black_piece_squares[0]
pawn_c = c.black_piece_squares[1]
if (pawn_c & 7) > 3:
wk = flip_we(wk)
pawn_a = flip_we(pawn_a)
pawn_b = flip_we(pawn_b)
bk = flip_we(bk)
pawn_c = flip_we(pawn_c)
i = flip_we(flip_ns(pawn_a)) - 8
j = flip_we(flip_ns(pawn_b)) - 8
# Black pawn, so low indexes mean more advanced.
k = map24_b(pawn_c)
pp48_slice = PP48_IDX[i][j]
if idx_is_empty(pp48_slice):
return NOINDEX
return k * BLOCK_Ax + pp48_slice * BLOCK_Bx + wk * BLOCK_Cx + bk
def kaakb_pctoindex(c):
N_WHITE = 3
N_BLACK = 2
BLOCK_Bx = 64
BLOCK_Ax = BLOCK_Bx * MAX_AAINDEX
ft = FLIPT[c.black_piece_squares[0]][c.white_piece_squares[0]]
ws = c.white_piece_squares[:N_WHITE]
bs = c.black_piece_squares[:N_BLACK]
if (ft & WE_FLAG) != 0:
ws = [flip_we(i) for i in ws]
bs = [flip_we(i) for i in bs]
if (ft & NS_FLAG) != 0:
ws = [flip_ns(i) for i in ws]
bs = [flip_ns(i) for i in bs]
if (ft & NW_SE_FLAG) != 0:
ws = [flip_nw_se(i) for i in ws]
bs = [flip_nw_se(i) for i in bs]
ki = KKIDX[bs[0]][ws[0]] # KKIDX[black king][white king]
ai = AAIDX[ws[1]][ws[2]]
if idx_is_empty(ki) or idx_is_empty(ai):
return NOINDEX
return ki * BLOCK_Ax + ai * BLOCK_Bx + bs[1]
def kabkc_pctoindex(c):
N_WHITE = 3
N_BLACK = 2
BLOCK_Ax = 64 * 64 * 64
BLOCK_Bx = 64 * 64
BLOCK_Cx = 64
ft = FLIPT[c.black_piece_squares[0]][c.white_piece_squares[0]]
ws = c.white_piece_squares[:N_WHITE]
bs = c.black_piece_squares[:N_BLACK]
if (ft & WE_FLAG) != 0:
ws = [flip_we(i) for i in ws]
bs = [flip_we(i) for i in bs]
if (ft & NS_FLAG) != 0:
ws = [flip_ns(i) for i in ws]
bs = [flip_ns(i) for i in bs]
if (ft & NW_SE_FLAG) != 0:
ws = [flip_nw_se(i) for i in ws]
bs = [flip_nw_se(i) for i in bs]
ki = KKIDX[bs[0]][ws[0]] # KKIDX [black king] [white king]
if idx_is_empty(ki):
return NOINDEX
return ki * BLOCK_Ax + ws[1] * BLOCK_Bx + ws[2] * BLOCK_Cx + bs[1]
def kpkp_pctoindex(c):
BLOCK_Ax = 64 * 64
BLOCK_Bx = 64
wk = c.white_piece_squares[0]
bk = c.black_piece_squares[0]
pawn_a = c.white_piece_squares[1]
pawn_b = c.black_piece_squares[1]
anchor = pawn_a
loosen = pawn_b
if (anchor & 7) > 3:
anchor = flip_we(anchor)
loosen = flip_we(loosen)
wk = flip_we(wk)
bk = flip_we(bk)
m = wsq_to_pidx24(anchor)
n = loosen - 8
pp_slice = m * 48 + n
if idx_is_empty(pp_slice):
return NOINDEX
return pp_slice * BLOCK_Ax + wk * BLOCK_Bx + bk
def kppk_pctoindex(c):
BLOCK_Ax = 64 * 64
BLOCK_Bx = 64
wk = c.white_piece_squares[0]
pawn_a = c.white_piece_squares[1]
pawn_b = c.white_piece_squares[2]
bk = c.black_piece_squares[0]
anchor, loosen = pp_putanchorfirst(pawn_a, pawn_b)
if (anchor & 7) > 3:
anchor = flip_we(anchor)
loosen = flip_we(loosen)
wk = flip_we(wk)
bk = flip_we(bk)
i = wsq_to_pidx24(anchor)
j = wsq_to_pidx48(loosen)
pp_slice = PPIDX[i][j]
if idx_is_empty(pp_slice):
return NOINDEX
return pp_slice * BLOCK_Ax + wk * BLOCK_Bx + bk
def kapk_pctoindex(c):
BLOCK_Ax = 64 * 64 * 64
BLOCK_Bx = 64 * 64
BLOCK_Cx = 64
pawn = c.white_piece_squares[2]
wa = c.white_piece_squares[1]
wk = c.white_piece_squares[0]
bk = c.black_piece_squares[0]
if not (chess.A2 <= pawn < chess.A8):
return NOINDEX
if (pawn & 7) > 3:
pawn = flip_we(pawn)
wk = flip_we(wk)
bk = flip_we(bk)
wa = flip_we(wa)
sq = pawn
sq ^= 56 # flip_ns
sq -= 8 # down one row
pslice = ((sq + (sq & 3)) >> 1)
return pslice * BLOCK_Ax + wk * BLOCK_Bx + bk * BLOCK_Cx + wa
def kabk_pctoindex(c):
BLOCK_Ax = 64 * 64
BLOCK_Bx = 64
ft = flip_type(c.black_piece_squares[0], c.white_piece_squares[0])
ws = c.white_piece_squares
bs = c.black_piece_squares
if (ft & 1) != 0:
ws = [flip_we(b) for b in ws]
bs = [flip_we(b) for b in bs]
if (ft & 2) != 0:
ws = [flip_ns(b) for b in ws]
bs = [flip_ns(b) for b in bs]
if (ft & 4) != 0:
ws = [flip_nw_se(b) for b in ws]
bs = [flip_nw_se(b) for b in bs]
ki = KKIDX[bs[0]][ws[0]] # KKIDX[black king][white king]
if idx_is_empty(ki):
return NOINDEX
return ki * BLOCK_Ax + ws[1] * BLOCK_Bx + ws[2]
def kakp_pctoindex(c):
BLOCK_Ax = 64 * 64 * 64
BLOCK_Bx = 64 * 64
BLOCK_Cx = 64
pawn = c.black_piece_squares[1]
wa = c.white_piece_squares[1]
wk = c.white_piece_squares[0]
bk = c.black_piece_squares[0]
if not (chess.A2 <= pawn < chess.A8):
return NOINDEX
if (pawn & 7) > 3:
pawn = flip_we(pawn)
wk = flip_we(wk)
bk = flip_we(bk)
wa = flip_we(wa)
sq = pawn
sq -= 8 # down one row
pslice = (sq + (sq & 3)) >> 1
return pslice * BLOCK_Ax + wk * BLOCK_Bx + bk * BLOCK_Cx + wa
def kaak_pctoindex(c):
N_WHITE = 3
N_BLACK = 1
BLOCK_Ax = MAX_AAINDEX
ft = FLIPT[c.black_piece_squares[0]][c.white_piece_squares[0]]
ws = c.white_piece_squares[:N_WHITE]
bs = c.black_piece_squares[:N_BLACK]
if (ft & WE_FLAG) != 0:
ws = [flip_we(i) for i in ws]
bs = [flip_we(i) for i in bs]
if (ft & NS_FLAG) != 0:
ws = [flip_ns(i) for i in ws]
bs = [flip_ns(i) for i in bs]
if (ft & NW_SE_FLAG) != 0:
ws = [flip_nw_se(i) for i in ws]
bs = [flip_nw_se(i) for i in bs]
ki = KKIDX[bs[0]][ws[0]] # KKIDX[black king][white king]
ai = AAIDX[ws[1]][ws[2]]
if idx_is_empty(ki) or idx_is_empty(ai):
return NOINDEX
return ki * BLOCK_Ax + ai
def kakb_pctoindex(c):
BLOCK_Ax = 64 * 64
BLOCK_Bx = 64
ft = FLIPT[c.black_piece_squares[0]][c.white_piece_squares[0]]
ws = c.white_piece_squares[:]
bs = c.black_piece_squares[:]
if (ft & 1) != 0:
ws[0] = flip_we(ws[0])
ws[1] = flip_we(ws[1])
bs[0] = flip_we(bs[0])
bs[1] = flip_we(bs[1])
if (ft & 2) != 0:
ws[0] = flip_ns(ws[0])
ws[1] = flip_ns(ws[1])
bs[0] = flip_ns(bs[0])
bs[1] = flip_ns(bs[1])
if (ft & 4) != 0:
ws[0] = flip_nw_se(ws[0])
ws[1] = flip_nw_se(ws[1])
bs[0] = flip_nw_se(bs[0])
bs[1] = flip_nw_se(bs[1])
ki = KKIDX[bs[0]][ws[0]] # KKIDX[black king][white king]
if idx_is_empty(ki):
return NOINDEX
return ki * BLOCK_Ax + ws[1] * BLOCK_Bx + bs[1]
def kpk_pctoindex(c):
BLOCK_A = 64 * 64
BLOCK_B = 64
pawn = c.white_piece_squares[1]
wk = c.white_piece_squares[0]
bk = c.black_piece_squares[0]
if not (chess.A2 <= pawn < chess.A8):
return NOINDEX
if (pawn & 7) > 3:
pawn = flip_we(pawn)
wk = flip_we(wk)
bk = flip_we(bk)
sq = pawn
sq ^= 56 # flip_ns
sq -= 8 # down one row
pslice = ((sq + (sq & 3)) >> 1)
res = pslice * BLOCK_A + wk * BLOCK_B + bk
return res
def kpppk_pctoindex(c):
BLOCK_A = 64 * 64
BLOCK_B = 64
wk = c.white_piece_squares[0]
pawn_a = c.white_piece_squares[1]
pawn_b = c.white_piece_squares[2]
pawn_c = c.white_piece_squares[3]
bk = c.black_piece_squares[0]
i = pawn_a - 8
j = pawn_b - 8
k = pawn_c - 8
ppp48_slice = PPP48_IDX[i][j][k]
if idx_is_empty(ppp48_slice):
wk = flip_we(wk)
pawn_a = flip_we(pawn_a)
pawn_b = flip_we(pawn_b)
pawn_c = flip_we(pawn_c)
bk = flip_we(bk)
i = pawn_a - 8
j = pawn_b - 8
k = pawn_c - 8
ppp48_slice = PPP48_IDX[i][j][k]
if idx_is_empty(ppp48_slice):
return NOINDEX
return ppp48_slice * BLOCK_A + wk * BLOCK_B + bk
Endgamekey = collections.namedtuple("Endgamekey", ["maxindex", "slice_n", "pctoi"])
EGKEY = {
"kqk": Endgamekey(MAX_KXK, 1, kxk_pctoindex),
"krk": Endgamekey(MAX_KXK, 1, kxk_pctoindex),
"kbk": Endgamekey(MAX_KXK, 1, kxk_pctoindex),
"knk": Endgamekey(MAX_KXK, 1, kxk_pctoindex),
"kpk": Endgamekey(MAX_kpk, 24, kpk_pctoindex),
"kqkq": Endgamekey(MAX_kakb, 1, kakb_pctoindex),
"kqkr": Endgamekey(MAX_kakb, 1, kakb_pctoindex),
"kqkb": Endgamekey(MAX_kakb, 1, kakb_pctoindex),
"kqkn": Endgamekey(MAX_kakb, 1, kakb_pctoindex),
"krkr": Endgamekey(MAX_kakb, 1, kakb_pctoindex),
"krkb": Endgamekey(MAX_kakb, 1, kakb_pctoindex),
"krkn": Endgamekey(MAX_kakb, 1, kakb_pctoindex),
"kbkb": Endgamekey(MAX_kakb, 1, kakb_pctoindex),
"kbkn": Endgamekey(MAX_kakb, 1, kakb_pctoindex),
"knkn": Endgamekey(MAX_kakb, 1, kakb_pctoindex),
"kqqk": Endgamekey(MAX_kaak, 1, kaak_pctoindex),
"kqrk": Endgamekey(MAX_kabk, 1, kabk_pctoindex),
"kqbk": Endgamekey(MAX_kabk, 1, kabk_pctoindex),
"kqnk": Endgamekey(MAX_kabk, 1, kabk_pctoindex),
"krrk": Endgamekey(MAX_kaak, 1, kaak_pctoindex),
"krbk": Endgamekey(MAX_kabk, 1, kabk_pctoindex),
"krnk": Endgamekey(MAX_kabk, 1, kabk_pctoindex),
"kbbk": Endgamekey(MAX_kaak, 1, kaak_pctoindex),
"kbnk": Endgamekey(MAX_kabk, 1, kabk_pctoindex),
"knnk": Endgamekey(MAX_kaak, 1, kaak_pctoindex),
"kqkp": Endgamekey(MAX_kakp, 24, kakp_pctoindex),
"krkp": Endgamekey(MAX_kakp, 24, kakp_pctoindex),
"kbkp": Endgamekey(MAX_kakp, 24, kakp_pctoindex),
"knkp": Endgamekey(MAX_kakp, 24, kakp_pctoindex),
"kqpk": Endgamekey(MAX_kapk, 24, kapk_pctoindex),
"krpk": Endgamekey(MAX_kapk, 24, kapk_pctoindex),
"kbpk": Endgamekey(MAX_kapk, 24, kapk_pctoindex),
"knpk": Endgamekey(MAX_kapk, 24, kapk_pctoindex),
"kppk": Endgamekey(MAX_kppk, MAX_PPINDEX, kppk_pctoindex),
"kpkp": Endgamekey(MAX_kpkp, MAX_PpINDEX, kpkp_pctoindex),
"kppkp": Endgamekey(MAX_kppkp, 24 * MAX_PP48_INDEX, kppkp_pctoindex),
"kbbkr": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"kbbkb": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"knnkb": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"knnkn": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"kqqqk": Endgamekey(MAX_kaaak, 1, kaaak_pctoindex),
"kqqrk": Endgamekey(MAX_kaabk, 1, kaabk_pctoindex),
"kqqbk": Endgamekey(MAX_kaabk, 1, kaabk_pctoindex),
"kqqnk": Endgamekey(MAX_kaabk, 1, kaabk_pctoindex),
"kqrrk": Endgamekey(MAX_kabbk, 1, kabbk_pctoindex),
"kqrbk": Endgamekey(MAX_kabck, 1, kabck_pctoindex),
"kqrnk": Endgamekey(MAX_kabck, 1, kabck_pctoindex),
"kqbbk": Endgamekey(MAX_kabbk, 1, kabbk_pctoindex),
"kqbnk": Endgamekey(MAX_kabck, 1, kabck_pctoindex),
"kqnnk": Endgamekey(MAX_kabbk, 1, kabbk_pctoindex),
"krrrk": Endgamekey(MAX_kaaak, 1, kaaak_pctoindex),
"krrbk": Endgamekey(MAX_kaabk, 1, kaabk_pctoindex),
"krrnk": Endgamekey(MAX_kaabk, 1, kaabk_pctoindex),
"krbbk": Endgamekey(MAX_kabbk, 1, kabbk_pctoindex),
"krbnk": Endgamekey(MAX_kabck, 1, kabck_pctoindex),
"krnnk": Endgamekey(MAX_kabbk, 1, kabbk_pctoindex),
"kbbbk": Endgamekey(MAX_kaaak, 1, kaaak_pctoindex),
"kbbnk": Endgamekey(MAX_kaabk, 1, kaabk_pctoindex),
"kbnnk": Endgamekey(MAX_kabbk, 1, kabbk_pctoindex),
"knnnk": Endgamekey(MAX_kaaak, 1, kaaak_pctoindex),
"kqqkq": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"kqqkr": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"kqqkb": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"kqqkn": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"kqrkq": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kqrkr": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kqrkb": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kqrkn": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kqbkq": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kqbkr": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kqbkb": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kqbkn": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kqnkq": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kqnkr": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kqnkb": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kqnkn": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"krrkq": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"krrkr": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"krrkb": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"krrkn": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"krbkq": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"krbkr": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"krbkb": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"krbkn": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"krnkq": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"krnkr": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"krnkb": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"krnkn": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kbbkq": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"kbbkn": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"kbnkq": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kbnkr": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kbnkb": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"kbnkn": Endgamekey(MAX_kabkc, 1, kabkc_pctoindex),
"knnkq": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"knnkr": Endgamekey(MAX_kaakb, 1, kaakb_pctoindex),
"kqqpk": Endgamekey(MAX_kaapk, 24, kaapk_pctoindex),
"kqrpk": Endgamekey(MAX_kabpk, 24, kabpk_pctoindex),
"kqbpk": Endgamekey(MAX_kabpk, 24, kabpk_pctoindex),
"kqnpk": Endgamekey(MAX_kabpk, 24, kabpk_pctoindex),
"krrpk": Endgamekey(MAX_kaapk, 24, kaapk_pctoindex),
"krbpk": Endgamekey(MAX_kabpk, 24, kabpk_pctoindex),
"krnpk": Endgamekey(MAX_kabpk, 24, kabpk_pctoindex),
"kbbpk": Endgamekey(MAX_kaapk, 24, kaapk_pctoindex),
"kbnpk": Endgamekey(MAX_kabpk, 24, kabpk_pctoindex),
"knnpk": Endgamekey(MAX_kaapk, 24, kaapk_pctoindex),
"kqppk": Endgamekey(MAX_kappk, MAX_PPINDEX, kappk_pctoindex),
"krppk": Endgamekey(MAX_kappk, MAX_PPINDEX, kappk_pctoindex),
"kbppk": Endgamekey(MAX_kappk, MAX_PPINDEX, kappk_pctoindex),
"knppk": Endgamekey(MAX_kappk, MAX_PPINDEX, kappk_pctoindex),
"kqpkq": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"kqpkr": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"kqpkb": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"kqpkn": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"krpkq": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"krpkr": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"krpkb": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"krpkn": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"kbpkq": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"kbpkr": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"kbpkb": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"kbpkn": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"knpkq": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"knpkr": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"knpkb": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"knpkn": Endgamekey(MAX_kapkb, 24, kapkb_pctoindex),
"kppkq": Endgamekey(MAX_kppka, MAX_PPINDEX, kppka_pctoindex),
"kppkr": Endgamekey(MAX_kppka, MAX_PPINDEX, kppka_pctoindex),
"kppkb": Endgamekey(MAX_kppka, MAX_PPINDEX, kppka_pctoindex),
"kppkn": Endgamekey(MAX_kppka, MAX_PPINDEX, kppka_pctoindex),
"kqqkp": Endgamekey(MAX_kaakp, 24, kaakp_pctoindex),
"kqrkp": Endgamekey(MAX_kabkp, 24, kabkp_pctoindex),
"kqbkp": Endgamekey(MAX_kabkp, 24, kabkp_pctoindex),
"kqnkp": Endgamekey(MAX_kabkp, 24, kabkp_pctoindex),
"krrkp": Endgamekey(MAX_kaakp, 24, kaakp_pctoindex),
"krbkp": Endgamekey(MAX_kabkp, 24, kabkp_pctoindex),
"krnkp": Endgamekey(MAX_kabkp, 24, kabkp_pctoindex),
"kbbkp": Endgamekey(MAX_kaakp, 24, kaakp_pctoindex),
"kbnkp": Endgamekey(MAX_kabkp, 24, kabkp_pctoindex),
"knnkp": Endgamekey(MAX_kaakp, 24, kaakp_pctoindex),
"kqpkp": Endgamekey(MAX_kapkp, MAX_PpINDEX, kapkp_pctoindex),
"krpkp": Endgamekey(MAX_kapkp, MAX_PpINDEX, kapkp_pctoindex),
"kbpkp": Endgamekey(MAX_kapkp, MAX_PpINDEX, kapkp_pctoindex),
"knpkp": Endgamekey(MAX_kapkp, MAX_PpINDEX, kapkp_pctoindex),
"kpppk": Endgamekey(MAX_kpppk, MAX_PPP48_INDEX, kpppk_pctoindex),
}
def sortlists(ws, wp):
z = sorted(zip(wp, ws), key=lambda x: x[0], reverse=True)
wp2, ws2 = zip(*z)
return list(ws2), list(wp2)
def egtb_block_unpack(side, n, bp):
try:
return [dtm_unpack(side, i) for i in bp[:n]]
except TypeError:
return [dtm_unpack(side, ord(i)) for i in bp[:n]]
def split_index(i):
return divmod(i, ENTRIES_PER_BLOCK)
tb_DRAW = 0
tb_WMATE = 1
tb_BMATE = 2
tb_FORBID = 3
tb_UNKNOWN = 7
iDRAW = tb_DRAW
iWMATE = tb_WMATE
iBMATE = tb_BMATE
iFORBID = tb_FORBID
iDRAWt = tb_DRAW | 4
iWMATEt = tb_WMATE | 4
iBMATEt = tb_BMATE | 4
iUNKNOWN = tb_UNKNOWN
iUNKNBIT = (1 << 2)
def removepiece(ys, yp, j):
del ys[j]
del yp[j]
def opp(side):
return 1 if side == 0 else 0
def adjust_up(dist):
udist = dist
sw = udist & INFOMASK
if sw in [iWMATE, iWMATEt, iBMATE, iBMATEt]:
udist += (1 << PLYSHIFT)
return udist
def bestx(side, a, b):
# 0 = selectfirst
# 1 = selectlowest
# 2 = selecthighest
# 3 = selectsecond
comparison = [
# draw, wmate, bmate, forbid
[0, 3, 0, 0], # draw
[0, 1, 0, 0], # wmate
[3, 3, 2, 0], # bmate
[3, 3, 3, 0], # forbid
]
xorkey = [0, 3]
if (a == iFORBID):
return b
if (b == iFORBID):
return a
retu = [a, a, b, b]
if (b < a):
retu[1] = b
retu[2] = a
key = comparison[a & 3][b & 3] ^ xorkey[side]
return retu[key]
def unpackdist(d):
return d >> PLYSHIFT, d & INFOMASK
def dtm_unpack(stm, packed):
p = packed
if p in [iDRAW, iFORBID]:
return p
info = p & 3
store = p >> 2
if stm == 0:
if info == iWMATE:
moves = store + 1
plies = moves * 2 - 1
prefx = info
elif info == iBMATE:
moves = store
plies = moves * 2
prefx = info
elif info == iDRAW:
moves = store + 1 + 63
plies = moves * 2 - 1
prefx = iWMATE
elif info == iFORBID:
moves = store + 63
plies = moves * 2
prefx = iBMATE
else:
plies = 0
prefx = 0
ret = prefx | (plies << 3)
else:
if info == iBMATE:
moves = store + 1
plies = moves * 2 - 1
prefx = info
elif info == iWMATE:
moves = store
plies = moves * 2
prefx = info
elif info == iDRAW:
if store == 63:
# Exception: no position in the 5-man TBs needs to store 63 for
# iBMATE. It is then just used to indicate iWMATE.
store += 1
moves = store + 63
plies = moves * 2
prefx = iWMATE
else:
moves = store + 1 + 63
plies = moves * 2 - 1
prefx = iBMATE
elif info == iFORBID:
moves = store + 63
plies = moves * 2
prefx = iWMATE
else:
plies = 0
prefx = 0
ret = prefx | (plies << 3)
return ret
class TableBlock(object):
def __init__(self, egkey, side, offset, age):
self.egkey = egkey
self.side = side
self.offset = offset
self.age = age
self.pcache = None
class Request(object):
def __init__(self, white_squares, white_types, black_squares, black_types, side, epsq):
self.white_squares, self.white_types = sortlists(white_squares, white_types)
self.black_squares, self.black_types = sortlists(black_squares, black_types)
self.realside = side
self.side = side
self.epsq = epsq
self.egkey = None
self.white_piece_squares = None
self.white_piece_types = None
self.black_piece_squares = None
self.black_piece_types = None
self.is_reversed = None
self.white_piece_squares = None
Zipinfo = collections.namedtuple("Zipinfo", ["extraoffset", "totalblocks", "blockindex"])
class PythonTablebases(object):
"""Provides access to Gaviota tablebases using pure Python code."""
def __init__(self, directory, lzma):
self.lzma = lzma
self.available_tables = {}
self.streams = {}
self.zipinfo = {}
self.block_cache = {}
self.block_age = 0
if directory is not None:
self.open_directory(directory)
def open_directory(self, directory):
"""Loads *.gtb.cp4* tables from a directory."""
directory = os.path.abspath(directory)
if not os.path.isdir(directory):
raise IOError("not a tablebase directory: {0}".format(repr(directory)))
for tbfile in fnmatch.filter(os.listdir(directory), "*.gtb.cp4"):
self.available_tables[os.path.basename(tbfile).replace(".gtb.cp4", "")] = os.path.join(directory, tbfile)
def probe_dtm(self, board):
"""
Probes for depth to mate information.
Returns ``None`` if the position was not found in any of the tables.
Otherwise the absolute value is the number of half moves until
forced mate. The value is positive if the side to move is winning,
otherwise it is negative.
In the example position white to move will get mated in 10 half moves:
>>> with chess.gaviota.open_tablebases("data/gaviota") as tablebases:
... tablebases.probe_dtm(chess.Board("8/8/8/8/8/8/8/K2kr3 w - - 0 1"))
...
-10
"""
# Can not probe positions with castling rights.
if board.castling_rights:
return None
# Prepare the tablebase request.
white = [(square, board.piece_type_at(square)) for square in chess.SquareSet(board.occupied_co[chess.WHITE])]
black = [(square, board.piece_type_at(square)) for square in chess.SquareSet(board.occupied_co[chess.BLACK])]
white_squares, white_types = zip(*white)
black_squares, black_types = zip(*black)
side = 0 if (board.turn == chess.WHITE) else 1
epsq = board.ep_square if board.ep_square else NOSQUARE
req = Request(white_squares, white_types, black_squares, black_types, side, epsq)
# KvK is a draw.
if len(white_squares) == 1 and len(black_squares) == 1:
return 0
# Only up to 5-men tablebases.
if len(white_squares) + len(black_squares) > 5:
return None
# Probe.
try:
dtm = self.egtb_get_dtm(req)
except IndexError:
return None
ply, res = unpackdist(dtm)
if res == iDRAW:
# Draw.
return 0
elif res == iWMATE:
# White mates in the stored position.
if req.realside == 1:
if req.is_reversed:
return ply
else:
return -ply
else:
if req.is_reversed:
return -ply
else:
return ply
elif res == iBMATE:
# Black mates in the stored position.
if req.realside == 0:
if req.is_reversed:
return ply
else:
return -ply
else:
if req.is_reversed:
return -ply
else:
return ply
def probe_wdl(self, board):
"""
Probes for win/draw/loss-information.
Returns ``None`` if the position was not found in any of the tables.
Returns ``1`` if the side to move is winning, ``0`` if it is a draw,
and ``-1`` if the side to move is losing.
>>> with chess.gaviota.open_tablebases("data/gaviota") as tablebases:
... tablebases.probe_wdl(chess.Board("8/4k3/8/B7/8/8/8/4K3 w - - 0 1"))
...
0
"""
dtm = self.probe_dtm(board)
if dtm == 0:
if board.is_checkmate():
return -1
else:
return 0
elif dtm > 0:
return 1
elif dtm < 0:
return -1
else:
return None
def _setup_tablebase(self, req):
white_letters = "".join([chess.PIECE_SYMBOLS[i] for i in req.white_types])
black_letters = "".join([chess.PIECE_SYMBOLS[i] for i in req.black_types])
if (white_letters + black_letters) in self.available_tables:
req.is_reversed = False
req.egkey = white_letters + black_letters
req.white_piece_squares = req.white_squares
req.white_piece_types = req.white_types
req.black_piece_squares = req.black_squares
req.black_piece_types = req.black_types
elif (black_letters + white_letters) in self.available_tables:
req.is_reversed = True
req.egkey = black_letters + white_letters
req.white_piece_squares = [flip_ns(s) for s in req.black_squares]
req.white_piece_types = req.black_types
req.black_piece_squares = [flip_ns(s) for s in req.white_squares]
req.black_piece_types = req.white_types
req.side = opp(req.side)
if req.epsq != NOSQUARE:
req.epsq = flip_ns(req.epsq)
else:
raise IndexError("no tablebase available for: {0} {1}".format(white_letters, black_letters))
return self._open_tablebase(req)
def _open_tablebase(self, req):
stream = self.streams.get(req.egkey)
if stream is None:
path = self.available_tables[req.egkey]
stream = open(path, "rb+")
self.egtb_loadindexes(req.egkey, stream)
self.streams[req.egkey] = stream
return stream
def close(self):
"""Closes all loaded tables."""
self.available_tables.clear()
self.zipinfo.clear()
self.block_age = 0
self.block_cache.clear()
while self.streams:
_, stream = self.streams.popitem()
stream.close()
def egtb_get_dtm(self, req):
dtm = self._tb_probe(req)
if req.epsq != NOSQUARE:
capturer_a = 0
capturer_b = 0
xed = 0
# Flip for move generation.
if req.side == 0:
xs = list(req.white_piece_squares)
xp = list(req.white_piece_types)
ys = list(req.black_piece_squares)
yp = list(req.black_piece_types)
else:
xs = list(req.black_piece_squares)
xp = list(req.black_piece_types)
ys = list(req.white_piece_squares)
yp = list(req.white_piece_types)
# Captured pawn trick: from ep square to captured.
xed = req.epsq ^ (1 << 3)
# Find captured index (j).
try:
j = ys.index(xed)
except ValueError:
j = -1
# Try first possible ep capture.
if 0 == (0x88 & (map88(xed) + 1)):
capturer_a = xed + 1
# Try second possible ep capture
if 0 == (0x88 & (map88(xed) - 1)):
capturer_b = xed - 1
if (j > -1) and (ys[j] == xed):
# Find capturers (i).
for i in range(len(xs)):
if xp[i] == PAWN and (xs[i] == capturer_a or xs[i] == capturer_b):
epscore = iFORBID
# Execute capture.
xs[i] = req.epsq
removepiece(ys, yp, j)
# Flip back.
if req.side == 1:
xs, ys = ys, xs
xp, yp = yp, xp
# Make subrequest.
subreq = Request(xs, xp, ys, yp, opp(req.side), NOSQUARE)
try:
epscore = self._tb_probe(subreq)
epscore = adjust_up(epscore)
# Chooses to ep or not.
dtm = bestx(req.side, epscore, dtm)
except IndexError:
break
return dtm
def egtb_block_getnumber(self, req, idx):
maxindex = EGKEY[req.egkey].maxindex
blocks_per_side = 1 + (maxindex - 1) // ENTRIES_PER_BLOCK
block_in_side = idx // ENTRIES_PER_BLOCK
return req.side * blocks_per_side + block_in_side
def egtb_block_getsize(self, req, idx):
blocksz = ENTRIES_PER_BLOCK
maxindex = EGKEY[req.egkey].maxindex
block = idx // blocksz
offset = block * blocksz
if (offset + blocksz) > maxindex:
return maxindex - offset # last block size
else:
return blocksz # size of a normal block
def _tb_probe(self, req):
stream = self._setup_tablebase(req)
idx = EGKEY[req.egkey].pctoi(req)
offset, remainder = split_index(idx)
t = self.block_cache.get((req.egkey, offset, req.side))
if t is None:
t = TableBlock(req.egkey, req.side, offset, self.block_age)
block = self.egtb_block_getnumber(req, idx)
n = self.egtb_block_getsize(req, idx)
z = self.egtb_block_getsize_zipped(req.egkey, block)
self.egtb_block_park(req.egkey, block, stream)
buffer_zipped = stream.read(z)
if buffer_zipped[0] == 0:
# If flag is zero, plain LZMA is following.
buffer_zipped = buffer_zipped[2:]
else:
# Else LZMA86. We have to build a fake header.
DICTIONARY_SIZE = 4096
POS_STATE_BITS = 2
NUM_LITERAL_POS_STATE_BITS = 0
NUM_LITERAL_CONTEXT_BITS = 3
properties = bytearray(13)
properties[0] = (POS_STATE_BITS * 5 + NUM_LITERAL_POS_STATE_BITS) * 9 + NUM_LITERAL_CONTEXT_BITS
for i in range(4):
properties[1 + i] = (DICTIONARY_SIZE >> (8 * i)) & 0xFF
for i in range(8):
properties[5 + i] = (n >> (8 * i)) & 0xFF
# Concatenate the fake header with the true LZMA stream.
buffer_zipped = properties + buffer_zipped[15:]
buffer_packed = self.lzma.LZMADecompressor().decompress(buffer_zipped)
t.pcache = egtb_block_unpack(req.side, n, buffer_packed)
# Update LRU block cache.
self.block_cache[(t.egkey, t.offset, t.side)] = t
if len(self.block_cache) > 128:
lru_cache_key, lru_age = None, None
for cache_key, cache_entry in self.block_cache.items():
if lru_age is None or cache_entry.age < lru_age:
lru_cache_key = cache_key
lru_age = cache_entry.age
del self.block_cache[lru_cache_key]
else:
t.age = self.block_age
self.block_age += 1
dtm = t.pcache[remainder]
return dtm
def egtb_loadindexes(self, egkey, stream):
zipinfo = self.zipinfo.get(egkey)
if zipinfo is None:
# Get reserved bytes, blocksize, offset.
stream.seek(0)
HeaderStruct = struct.Struct("<10I")
header = HeaderStruct.unpack(stream.read(HeaderStruct.size))
offset = header[8]
blocks = ((offset - 40) // 4) - 1
n_idx = blocks + 1
IndexStruct = struct.Struct("<" + "I" * n_idx)
p = IndexStruct.unpack(stream.read(IndexStruct.size))
zipinfo = Zipinfo(extraoffset=0, totalblocks=n_idx, blockindex=p)
self.zipinfo[egkey] = zipinfo
return zipinfo
def egtb_block_getsize_zipped(self, egkey, block):
i = self.zipinfo[egkey].blockindex[block]
j = self.zipinfo[egkey].blockindex[block + 1]
return j - i
def egtb_block_park(self, egkey, block, stream):
i = self.zipinfo[egkey].blockindex[block]
i += self.zipinfo[egkey].extraoffset
stream.seek(i)
return i
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class NativeTablebases(object):
"""
Provides access to Gaviota tablebases via the shared library libgtb.
Has the same interface as :class:`~chess.gaviota.PythonTablebases`.
"""
def __init__(self, directory, libgtb):
self.libgtb = libgtb
self.libgtb.tb_init.restype = ctypes.c_char_p
self.libgtb.tb_restart.restype = ctypes.c_char_p
self.libgtb.tbpaths_getmain.restype = ctypes.c_char_p
self.libgtb.tb_probe_hard.argtypes = [
ctypes.c_uint,
ctypes.c_uint,
ctypes.c_uint,
ctypes.POINTER(ctypes.c_uint),
ctypes.POINTER(ctypes.c_uint),
ctypes.POINTER(ctypes.c_ubyte),
ctypes.POINTER(ctypes.c_ubyte),
ctypes.POINTER(ctypes.c_uint),
ctypes.POINTER(ctypes.c_uint)
]
if self.libgtb.tb_is_initialized():
raise RuntimeError("only one gaviota instance can be initialized at a time")
self.paths = []
if directory is not None:
self.open_directory(directory)
self._tbcache_restart(1024 * 1024, 50)
def open_directory(self, directory):
if not os.path.isdir(directory):
raise IOError("not a tablebase directory: {0}".format(repr(directory)))
self.paths.append(directory)
self._tb_restart()
def _tb_restart(self):
self.c_paths = (ctypes.c_char_p * len(self.paths))()
self.c_paths[:] = [path.encode("utf-8") for path in self.paths]
verbosity = ctypes.c_int(1)
compression_scheme = ctypes.c_int(4)
ret = self.libgtb.tb_restart(verbosity, compression_scheme, self.c_paths)
if ret:
LOGGER.debug(ret.decode("utf-8"))
LOGGER.debug("Main path has been set to %r", self.libgtb.tbpaths_getmain().decode("utf-8"))
av = self.libgtb.tb_availability()
if av & 1:
LOGGER.debug("Some 3 piece tablebases available")
if av & 2:
LOGGER.debug("All 3 piece tablebases complete")
if av & 4:
LOGGER.debug("Some 4 piece tablebases available")
if av & 8:
LOGGER.debug("All 4 piece tablebases complete")
if av & 16:
LOGGER.debug("Some 5 piece tablebases available")
if av & 32:
LOGGER.debug("All 5 piece tablebases complete")
def _tbcache_restart(self, cache_mem, wdl_fraction):
self.libgtb.tbcache_restart(ctypes.c_size_t(cache_mem), ctypes.c_int(wdl_fraction))
def probe_dtm(self, board):
return self._probe_hard(board)
def probe_wdl(self, board):
return self._probe_hard(board, wdl_only=True)
def _probe_hard(self, board, wdl_only=False):
if board.is_insufficient_material():
return 0
if chess.pop_count(board.occupied) > 5:
return None
if board.castling_rights:
return None
stm = ctypes.c_uint(0 if board.turn == chess.WHITE else 1)
ep_square = ctypes.c_uint(board.ep_square if board.ep_square else 64)
castling = ctypes.c_uint(0)
c_ws = (ctypes.c_uint * 17)()
c_wp = (ctypes.c_ubyte * 17)()
i = -1
for i, square in enumerate(chess.SquareSet(board.occupied_co[chess.WHITE])):
c_ws[i] = square
c_wp[i] = board.piece_type_at(square)
c_ws[i + 1] = 64
c_wp[i + 1] = 0
c_bs = (ctypes.c_uint * 17)()
c_bp = (ctypes.c_ubyte * 17)()
i = -1
for i, square in enumerate(chess.SquareSet(board.occupied_co[chess.BLACK])):
c_bs[i] = square
c_bp[i] = board.piece_type_at(square)
c_bs[i + 1] = 64
c_bp[i + 1] = 0
# Do a hard probe.
info = ctypes.c_uint()
pliestomate = ctypes.c_uint()
if not wdl_only:
ret = self.libgtb.tb_probe_hard(stm, ep_square, castling, c_ws, c_bs, c_wp, c_bp, ctypes.byref(info), ctypes.byref(pliestomate))
dtm = int(pliestomate.value)
else:
ret = self.libgtb.tb_probe_WDL_hard(stm, ep_square, castling, c_ws, c_bs, c_wp, c_bp, ctypes.byref(info))
dtm = 1
# Probe forbidden.
if info.value == 3:
LOGGER.warning("Tablebase for %s marked as forbidden", board.fen())
return None
# Probe failed or unknown.
if not ret or info.value == 7:
return None
# Draw.
if info.value == 0:
return 0
# White mates.
if info.value == 1:
return dtm if board.turn == chess.WHITE else -dtm
# Black mates.
if info.value == 2:
return dtm if board.turn == chess.BLACK else -dtm
def close(self):
self.paths = []
if self.libgtb.tb_is_initialized():
self.libgtb.tbcache_done()
self.libgtb.tb_done()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open_tablebases_native(directory, libgtb=None, LibraryLoader=ctypes.cdll):
"""
Opens a collection of tablebases for probing using libgtb.
In most cases :func:`~chess.gaviota.open_tablebases()` should be used.
Use this function only if you do not want to downgrade to pure Python
tablebase probing.
Raises :exc:`RuntimeError` or :exc:`OSError` when libgtb can not be used.
"""
libgtb = libgtb or ctypes.util.find_library("gtb") or "libgtb.so.1.0.1"
return NativeTablebases(directory, LibraryLoader.LoadLibrary(libgtb))
def open_tablebases(directory=None, libgtb=None, LibraryLoader=ctypes.cdll):
"""
Opens a collection of tablebases for probing.
First native access via the shared library libgtb is tried. You can
optionally provide a specific library name or a library loader.
The shared library has global state and caches, so only one instance can
be open at a time.
Second pure Python probing code is tried.
"""
try:
if LibraryLoader:
return open_tablebases_native(directory, libgtb, LibraryLoader)
except (OSError, RuntimeError) as err:
LOGGER.info("Falling back to pure Python tablebases: %r", err)
try:
import lzma
except ImportError:
try:
from backports import lzma
except ImportError:
raise ImportError("chess.gaviota requires backports.lzma or libgtb")
return PythonTablebases(directory, lzma)
| Bojanovski/ChessANN | Code/dependencies/chess/gaviota.py | Python | mit | 63,321 |
from django.test import TestCase
from countries import factories
class ModelsTests(TestCase):
def test_models_continent_str(self):
continent = factories.ContinentFactory()
self.assertEqual(str(continent), continent.code)
def test_models_countries_str(self):
country = factories.CountryFactory()
self.assertEqual(str(country), country.cca2)
def test_models_language_str(self):
language = factories.LanguageFactory()
self.assertEqual(str(language), language.cla3)
def test_models_country_name_str(self):
country_name = factories.CountryNameFactory()
self.assertTrue(
str(country_name).startswith(country_name.country.cca2))
def test_models_division_str(self):
division = factories.DivisionFactory()
self.assertTrue(str(division).startswith(division.country.cca2))
def test_models_currency_str(self):
currency = factories.CurrencyFactory()
self.assertEqual(str(currency), currency.code)
def test_models_locale_str(self):
locale = factories.LocaleFactory()
self.assertEqual(str(locale), locale.code)
def test_models_timezone_str(self):
timezone = factories.TimezoneFactory()
self.assertEqual(str(timezone), timezone.name)
| flavors/countries | tests/test_models.py | Python | mit | 1,302 |
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9876)
| austintrose/easy-graphs | application.py | Python | mit | 227 |
import mbuild as mb
from mbuild.lib.moieties import CH3
class Ethane(mb.Compound):
"""An ethane molecule. """
def __init__(self):
"""Connect two methyl groups to form an ethane. """
super(Ethane, self).__init__()
self.add(CH3(), "methyl1")
self.add(CH3(), "methyl2")
mb.equivalence_transform(self['methyl1'], self['methyl1']['up'], self['methyl2']['up'])
def main():
ethane = Ethane()
return ethane
if __name__ == "__main__":
ethane = main()
atoms = list(ethane.particles())
print(ethane)
for a in ethane.particles():
print(a)
ethane.visualize(show_ports=True)
| Jonestj1/mbuild | mbuild/examples/ethane/ethane.py | Python | mit | 652 |
import os
import sys
import stat
import subprocess
import shutil
import click
import requests
import time
from configparser import ConfigParser
import getpass
APP_DIR = click.get_app_dir('juantap')
CONFIG_PATH = os.path.join(APP_DIR, 'config.ini')
click.echo(CONFIG_PATH)
CFG = ConfigParser()
def write_config():
with open(CONFIG_PATH, 'w') as cfg_file:
CFG.write(cfg_file)
def default_config():
CFG['system'] = {
'JuantapUser' : getpass.getuser(),
'RootServerDir' : os.path.expanduser('~/rootserver'),
'InstancesDir' : os.path.expanduser('~/instances'),
'NumberOfInstances' : 2,
'BaseHostname': "Juantap",
}
write_config()
if not os.path.exists(CONFIG_PATH):
os.makedirs(APP_DIR, exist_ok=True)
default_config()
CFG.read(CONFIG_PATH)
if 'system' not in CFG.sections():
click.echo("Malformed config file!")
if click.confirm("Do you want to reset the config?"):
default_config()
else:
sys.exit(1)
@click.group()
def cli():
"""
CLI for managing multiple csgo server instances and their root server.
Harnesses the power of overlayfs
"""
pass
from . import config, instances, root
cli.add_command(config.config)
cli.add_command(instances.instances)
cli.add_command(root.root)
| mathiassmichno/juantap | juantap/__init__.py | Python | mit | 1,304 |
class Error(Exception):
def __init__(self, message):
super(Error, self).__init__(message)
self.message = message
class PathError(Error, ValueError):
pass
class NotFoundError(Error, IOError):
pass
| grow/grow | grow/storage/errors.py | Python | mit | 229 |
# -*- coding: utf-8 -*-
import re
import hashlib
import logging
from tornado.web import RequestHandler
import tornado.web
from common.settings import DEBUG, MEMCACHE_ENABLED
from common.query_settings import APIdict
if MEMCACHE_ENABLED:
from api.db_selectors import mc
_asciire = re.compile('([\x00-\x7f]+)')
def _is_unicode(x):
return isinstance(x, unicode)
_hexdig = '0123456789ABCDEFabcdef'
_hextochr = dict((a + b, chr(int(a + b, 16)))
for a in _hexdig for b in _hexdig)
def unquote(input_str):
"""unquote('abc%20def') -> 'abc def'."""
if _is_unicode(input_str):
if '%' not in input_str:
return input_str
bits = _asciire.split(input_str)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(unquote(str(bits[i])).decode('utf-8'))
append(bits[i + 1])
return ''.join(res)
bits = input_str.split('%')
# fastpath
if len(bits) == 1:
return input_str
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_hextochr[item[:2]])
append(item[2:])
except KeyError:
append('%')
append(item)
return ''.join(res)
class MainHandler(RequestHandler):
def get(self):
self.write("ClearSpending.ru API v3")
class InvalidRequestHandler(RequestHandler):
def get(self, query_str, resource_name, method_name):
self.write(u"Invalid request.")
class AllHandler(RequestHandler):
def get(self, query_str):
print query_str
self.write("api ")
self.write(query_str)
def parse_returnfields(fields_string):
'''
Преобразует параметр запроса, содержащий список запрашиваемых полей, в словарь запрашиваемых полей вида
{'название': u'значение'}
'''
fields_dict = None
try:
fields = unicode(fields_string).replace(u"[", u"").replace(u"]", u"").replace(u"{", u"").replace(u"}",
u"").strip()
if fields <> "":
fields_dict = dict(map(lambda f: (f.strip(), 1), (field for field in fields.split(","))))
except:
fields_dict = None
return fields_dict
def parse_pameters(parameters):
'''
Преобразует get-параметры запроса в словарь параметров вида {'название': u'значение'...}.
:param parameters: строка вида u'название1=значение1&название2=значение2'
:return: словарь вида {u'название1': u'значение1', u'название2': u'значение2'}
'''
parametersDict = dict(map(lambda p: (p.split("=", 2)[0], p.split("=", 2)[1]),
(parameter for parameter in parameters.strip().split("&"))))
returnfields = parametersDict.get("returnfields")
if returnfields <> None: parametersDict["returnfields"] = parse_returnfields(returnfields)
return parametersDict
class ApiV2Handler(RequestHandler):
@tornado.web.asynchronous
def get(self, query_str, resource_name, method_name):
parameters = unquote(unicode(self.request.query))
query_str = unquote(unicode(query_str)) + parameters
if DEBUG:
self.real_get_handler(query_str, resource_name, method_name, parameters)
return
try:
self.real_get_handler(query_str, resource_name, method_name, parameters)
except Exception as e:
self.set_status(500)
self.finish(u"Invalid request.")
logging.warning('Invalid request: %s' % query_str)
logging.warning('Invalid request: %s \n resourceName: %s, methodName: %s, parameters: %s' % (
query_str, resource_name, method_name, parameters))
logging.warning('Exception: %s' % str(e))
def real_get_handler(self, query_str, resource_name, method_name, parameters):
'''
Основной обработчик запросов c get-параметрами.
:param query_str: URL-путь вместе с get-параметрами
:param resource_name: название коллекции
:param method_name:
:param parameters: строка вида u'название1=значение1&название2=значение2'
'''
if MEMCACHE_ENABLED:
mc_key = hashlib.sha224(query_str.encode("UTF-8").replace(" ", "%20")).hexdigest()
data_str = mc.get(mc_key)
if data_str:
self.set_header("Content-Type", "application/json")
self.finish(data_str)
return None
parameters_for_db_query = parse_pameters(parameters)
parameters_for_db_query["queryStr"] = query_str
parameters_for_db_query["resourceName"] = resource_name
parameters_for_db_query["methodName"] = method_name
f_data = APIdict[method_name][resource_name]["function"]
data_str = f_data(parameters_for_db_query)
if data_str:
format = parameters_for_db_query.get('format', 'json')
if 'csv' in format:
self.set_header("Content-Type", "text/csv")
elif 'xls' in format:
self.set_header("Content-Type", "application/vnd.ms-excel")
else:
self.set_header("Content-Type", "application/json")
self.finish(data_str)
# Cache record will expired after 12 hours
if MEMCACHE_ENABLED:
mc.set(mc_key, data_str, time=43200)
else:
self.set_status(404)
self.finish(u"Data not found.")
| clearspending/api.clearspending.ru | api/handler.py | Python | mit | 5,904 |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="mesh3d.colorbar.tickfont", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/mesh3d/colorbar/tickfont/_size.py | Python | mit | 461 |
from django.contrib.gis.db import models
class Friend(models.Model):
'''A friend'''
# Custom GeoDjango manager
objects = models.GeoManager()
first_name = models.CharField(max_length=16, blank=False)
last_name = models.CharField(max_length=16)
# Where does your friend live?
street_address = models.CharField(max_length=16)
city = models.CharField(max_length=16)
location = models.PointField(geography=True)
def __unicode__(self):
return self.first_name
| johnwoltman/geotest | geotest/models.py | Python | mit | 486 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
from django.contrib import admin
from snisi_core.admin import ReportAdmin
from snisi_cataract.models import (CATMissionR,
CATSurgeryR)
admin.site.register(CATSurgeryR, ReportAdmin)
admin.site.register(CATMissionR, ReportAdmin)
| yeleman/snisi | snisi_cataract/admin.py | Python | mit | 453 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Editable Table
import django_tables2 as tables
import json
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.urls import reverse
from django.views import generic
from .models import Question, Choice
from .tables import QuestionTable
# Create your views here.
def index(request):
data = Question.objects.all()
data.edit_link = reverse('polls:edit')
table = QuestionTable(data)
context = {'table': table}
return render(request, 'polls/index.html', context)
def edit(request):
# Editable Table handler
# In case of error response body will be shown as error message
# If value is correct you server should return 'ok'
if request.is_ajax:
# Validation
try:
updateFields = json.loads(request.POST['updateFields'])
except:
return HttpResponse("Invalid data")
try:
for id in updateFields:
record = Question.objects.get(id=id)
for field in updateFields[id]:
setattr(record, field, updateFields[id][field])
record.save()
except:
# Cannot find record or invalid input data
return HttpResponse("Database error")
return HttpResponse('ok')
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question})
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
| vithd/vithd.github.io | django/mysite/polls/views.py | Python | mit | 2,771 |
# -*- coding: utf-8 -*-
import unittest
from logging import getLogger
from calmjs.parse.testing.util import build_equality_testcase
from calmjs.parse.testing.util import build_exception_testcase
from calmjs.parse.testing.util import setup_logger
from calmjs.parse.tests.parser import format_repr_program_type
def run(self):
"""
A dummy run method.
"""
class BuilderEqualityTestCase(unittest.TestCase):
def test_build_equality_testcase(self):
DummyTestCase = build_equality_testcase('DummyTestCase', int, [
('str_to_int_pass', '1', 1),
('str_to_int_fail', '2', 1),
('str_to_int_exception', 'z', 1),
])
DummyTestCase.runTest = run
testcase = DummyTestCase()
testcase.test_str_to_int_pass()
with self.assertRaises(AssertionError):
testcase.test_str_to_int_fail()
with self.assertRaises(ValueError):
testcase.test_str_to_int_exception()
def test_build_equality_testcase_flag_dupe_labels(self):
with self.assertRaises(ValueError):
build_equality_testcase('DummyTestCase', int, [
('str_to_int_dupe', '1', 1),
('str_to_int_dupe', '2', 2),
])
class BuilderExceptionTestCase(unittest.TestCase):
def test_build_exception_testcase(self):
def demo(arg):
if not arg.isdigit():
raise ValueError(arg + ' not a number')
FailTestCase = build_exception_testcase(
'FailTestCase', demo, [
('str_to_int_fail1', 'hello', 'hello not a number'),
('str_to_int_fail2', 'goodbye', 'hello not a number'),
('str_to_int_fail3', '1', '1 not a number'),
('str_to_int_no_msg', 'hello', None),
],
ValueError,
)
FailTestCase.runTest = run
testcase = FailTestCase()
# ValueError should have been caught.
testcase.test_str_to_int_fail1()
# no message check done.
testcase.test_str_to_int_no_msg()
# wrong exception message
with self.assertRaises(AssertionError):
testcase.test_str_to_int_fail2()
# Naturally, the final test will not raise it.
with self.assertRaises(AssertionError):
testcase.test_str_to_int_fail3()
class SetupLoggerTestCase(unittest.TestCase):
def test_build_exception_testcase(self):
class DemoTestCase(unittest.TestCase):
def runTest(self):
"""Dummy run method for PY2"""
testcase = DemoTestCase()
logger = getLogger('demo_test_case')
original_level = logger.level
original_handlers = len(logger.handlers)
setup_logger(testcase, logger)
self.assertNotEqual(original_level, logger.level)
self.assertNotEqual(original_handlers, len(logger.handlers))
testcase.doCleanups()
self.assertEqual(original_level, logger.level)
self.assertEqual(original_handlers, len(logger.handlers))
class ParserTestSetupTestCase(unittest.TestCase):
def test_match(self):
result = format_repr_program_type('foo', '<Program>', 'ES4Program')
self.assertEqual(result, '<ES4Program>')
def test_fail(self):
with self.assertRaises(ValueError) as e:
format_repr_program_type('foo', '<ES4Program>', 'ES4Program')
self.assertEqual(
e.exception.args[0], "repr test result for 'foo' did not start "
"with generic '<Program', got: <ES4Program>"
)
with self.assertRaises(ValueError) as e:
format_repr_program_type('foo', '<ES5Program>', 'ES4Program')
self.assertEqual(
e.exception.args[0], "repr test result for 'foo' did not start "
"with generic '<Program', got: <ES5Program>"
)
| calmjs/calmjs.parse | src/calmjs/parse/tests/test_testing.py | Python | mit | 3,879 |
import requests_unixsocket
import json
from utils.utils import Utils
u = Utils()
#https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/
class Events():
def __init__(self):
self.base = "http+unix://%2Fvar%2Frun%2Fdocker.sock"
self.url = "/events"
self.session = requests_unixsocket.Session()
try:
self.resp = self.session.get( self.base + self.url, stream= True)
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
def events(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
return resp.iter_lines()
| eon01/DoMonit | domonit/events.py | Python | mit | 816 |
import os
import subprocess
from flask import Flask, url_for, redirect, request, jsonify
cmd_folder = os.path.dirname(os.path.abspath(__file__))
f2f_path = os.path.join(cmd_folder, 'lib', 'f2f.pl')
app = Flask(__name__, static_url_path='')
#2. Define needed routes here
@app.route('/')
def index():
return redirect(url_for('static', filename='index.html'))
@app.route('/api', methods=['GET', 'POST'])
def f2fb():
if (request.method == 'POST' and request.is_xhr):
cmd = ['perl', f2f_path,
'--tab', request.form['tab']]
base_indent = request.form['base-indent']
if base_indent != u'-1':
cmd.extend(['--base-indent', base_indent])
dp = request.form['dp']
if dp == u'8':
cmd.extend(['--dp-to-star-kind', '8'])
elif dp == u'dp':
cmd.extend(['--dp-to-star-kind', 'dp'])
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = p.communicate(input=request.form['source'])
if request.form['lowercase'] == 'on':
output = output.lower()
return jsonify({'output': output, 'error': error})
else:
redirect('/')
#4. Main method for local developement
if __name__ == "__main__":
app.run(debug=True) | mgaitan/f2f_online | app.py | Python | mit | 1,408 |
import engine
import pytest
VALID_COORDS = [(x, y) for x in xrange(97, 105) for y in xrange(49, 57)]
INVALID_COORDS = [
(0, 0), (-1, -1),
(96, 49), (96, 48),
(105, 49), (104, 48),
(96, 56), (97, 57),
(105, 56), (104, 57)
]
VALID_A1 = [chr(x) + chr(y) for x in xrange(97, 105) for y in xrange(49, 57)]
INVALID_A1 = ['a0', 'a9', 'h0', 'h9', 'z1', 'z8']
EMPTY_BOARD = dict([((x, y), None) for x in xrange(97, 105) for y in xrange(49, 57)])
def test_coord_to_a1():
u"""Assert the coord-to-a1 dictionary has only the desired keypairs."""
for coord in VALID_COORDS:
assert engine._coord_to_a1.get(coord, False) is not False
for coord in INVALID_COORDS:
assert engine._coord_to_a1.get(coord, False) is False
def test_a1_to_coord():
u"""Assert the a1-to-coord dictionary has only the desired keypairs."""
for a1 in VALID_A1:
assert engine._a1_to_coord.get(a1, False) is not False
for a1 in INVALID_A1:
assert engine._a1_to_coord.get(a1, False) is False
def test_is_coord_on_board():
u"""Assert that only valid coords are considered in the board."""
for coord in VALID_COORDS:
assert engine._is_coord_on_board(coord) is True
for coord in INVALID_COORDS:
assert engine._is_coord_on_board(coord) is False
def test_instantiate_Piece():
u"""Assert an instance of Piece has the expected attributes."""
for coord in VALID_COORDS:
p = engine.Piece(coord)
assert isinstance(p, engine.Piece)
assert p.x, p.y == coord
def test_instantiate_SimpleUnit():
u"""Assert an instance of SimpleUnit has the expected attributes."""
for color in ['white', 'black']:
for coord in VALID_COORDS:
p = engine.SimpleUnit(coord, color)
assert isinstance(p, engine.SimpleUnit)
assert p.x, p.y == coord
assert p.color == color
def test_SimpleUnit_possible_moves_empty_space():
u"""Assert only valid moves are returned."""
p = engine.SimpleUnit(VALID_COORDS[0], 'white')
board = EMPTY_BOARD.copy()
board[(p.x, p.y)] = p
assert p.possible_moves(board) == [(p.x, p.y + 1)]
def test_SimpleUnit_possible_moves_into_ally():
u"""Assert moves into allied units are not in returned move list."""
p = engine.SimpleUnit(VALID_COORDS[0], 'white')
q = engine.SimpleUnit(VALID_COORDS[1], 'white')
board = EMPTY_BOARD.copy()
board[(p.x, p.y)] = p
board[(q.x, q.y)] = q
assert p.possible_moves(board) == []
def test_SimpleUnit_possible_moves_off_board():
u"""Assert that moves off the board are not in returned move list."""
p = engine.SimpleUnit(VALID_COORDS[-1], 'white')
board = EMPTY_BOARD.copy()
board[(p.x, p.y)] = p
assert p.possible_moves(board) == []
def test_SimpleUnit_possible_moves_into_enemy():
u"""Assert that moves into an enemy unit is in the returned move list."""
p = engine.SimpleUnit(VALID_COORDS[0], 'white')
q = engine.SimpleUnit(VALID_COORDS[1], 'black')
board = EMPTY_BOARD.copy()
board[(p.x, p.y)] = p
board[(q.x, q.y)] = q
assert p.possible_moves(board) == [(p.x, p.y + 1)]
def test_assert_SimpleUnit_moving_into_empty_space():
u"""Assert moving into empty space returns a board with unit moved."""
p = engine.SimpleUnit(VALID_COORDS[0], 'white')
board = EMPTY_BOARD.copy()
new_board = p.move(VALID_COORDS[1], board)
assert new_board[VALID_COORDS[0]] is None
assert new_board[VALID_COORDS[1]] is p
def test_SimpleUnit_moving_into_ally():
u"""Assert moving into allied units do not return a board."""
p = engine.SimpleUnit(VALID_COORDS[0], 'white')
q = engine.SimpleUnit(VALID_COORDS[1], 'white')
board = EMPTY_BOARD.copy()
board[(p.x, p.y)] = p
board[(q.x, q.y)] = q
new_board = p.move(VALID_COORDS[1], board)
assert new_board == board
def test_SimpleUnit_moving_off_board():
u"""Assert moving off the board do not return a board."""
p = engine.SimpleUnit(VALID_COORDS[-1], 'white')
board = EMPTY_BOARD.copy()
board[(p.x, p.y)] = p
new_board = p.move((105, 56), board)
assert new_board == board
def test_SimpleUnit_moving_into_enemy():
u"""Assert moving into an enemy unit returns a board with unit moved."""
p = engine.SimpleUnit(VALID_COORDS[0], 'white')
q = engine.SimpleUnit(VALID_COORDS[1], 'black')
board = EMPTY_BOARD.copy()
board[(p.x, p.y)] = p
board[(q.x, q.y)] = q
new_board = p.move(VALID_COORDS[1], board)
assert new_board[VALID_COORDS[0]] is None
assert new_board[VALID_COORDS[1]] is p
def test_instantiate_Match():
u"""Assert an instance of Match has the expected attributes."""
m = engine.Match()
assert isinstance(m, engine.Match)
assert m.board == dict([((x, y), None) for x in xrange(97, 105) for y in xrange(49, 57)])
def test_add_simple_units_to_Match():
u"""Assert simple units are added to only the top and bottom two rows."""
m = engine.Match()
m._add_simple_units()
for coord in [(x, y) for x in xrange(97, 105) for y in xrange(55, 57)]:
assert isinstance(m.board[coord], engine.SimpleUnit)
assert m.board[coord].color == 'black'
for coord in [(x, y) for x in xrange(97, 105) for y in xrange(49, 51)]:
assert isinstance(m.board[coord], engine.SimpleUnit)
assert m.board[coord].color == 'white'
for coord in [(x, y) for x in xrange(97, 105) for y in xrange(51, 55)]:
assert m.board[coord] is None
def test_move_unit_in_Match_to_valid_a1():
u"""Assert moving via Match to a valid pos maintains proper board state."""
m = engine.Match()
m._add_simple_units()
p = m.board[VALID_COORDS[1]]
m.move('a2', 'a3')
assert m.board[VALID_COORDS[1]] is None
assert m.board[VALID_COORDS[2]] is p
def test_move_unit_in_Match_no_unit_at_start_a1():
u"""Assert moving a unit from an empty location raises LookupError."""
m = engine.Match()
m._add_simple_units()
with pytest.raises(LookupError):
m.move('a4', 'a5')
def test_move_unit_in_Match_to_enemy_a1():
u"""Assert moving to an enemy unit maintains proper board state."""
m = engine.Match()
m._add_simple_units()
q = engine.SimpleUnit((97, 51), 'black')
m.board[(97, 51)] = q
p = m.board[VALID_COORDS[1]]
assert q in m.board.values()
m.move('a2', 'a3')
assert m.board[VALID_COORDS[1]] is None
assert m.board[VALID_COORDS[2]] is p
assert q not in m.board.values()
def test_move_unit_via_Match_into_ally_a1():
u"""Assert moving via Match to an ally pos does not update board state."""
m = engine.Match()
m._add_simple_units()
p = m.board[VALID_COORDS[0]]
q = m.board[VALID_COORDS[1]]
m.move('a1', 'a2')
assert m.board[VALID_COORDS[0]] is p
assert m.board[VALID_COORDS[1]] is q
| EyuelAbebe/gamer | test_engine.py | Python | mit | 6,871 |
OUTPUT = 'ko{}-{}ntti'.format
VOWELS = frozenset('aeiouyAEIOUY')
def kontti(s):
result = []
for word in s.split():
dex = next((i for i, a in enumerate(word) if a in VOWELS), -1) + 1
result.append(OUTPUT(word[dex:], word[:dex]) if dex else word)
return ' '.join(result)
| the-zebulan/CodeWars | katas/beta/kontti_language.py | Python | mit | 299 |
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="histogram.marker", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/histogram/marker/_colorsrc.py | Python | mit | 419 |
#! /bin/sh
""":"
exec python $0 ${1+"$@"}
"""
import fileinput
import os
import platform
import re
import sys
import argparse
all_files = {
# Proxy files for Ubuntu.
'ubuntu': {
'/etc/environment': [
'http_proxy=":proxy.http:"',
'https_proxy=":proxy.https:"',
'ftp_proxy=":proxy.ftp:"',
'socks_proxy=":proxy.socks:"'
],
'/etc/wgetrc': [
'http_proxy=":proxy.http:"',
'https_proxy=":proxy.https:"',
'ftp_proxy=":proxy.ftp:"',
'socks_proxy=":proxy.socks:"'
],
'/etc/apt/apt.conf': [
'Acquire::http::proxy ":proxy.http:";',
'Acquire::https::proxy ":proxy.https:";',
'Acquire::ftp::proxy ":proxy.ftp:";',
'Acquire::socks::proxy ":proxy.socks:";'
],
'/etc/bash.bashrc': [
'export http_proxy=":proxy.http:"',
'export https_proxy=":proxy.https:"',
'export ftp_proxy=":proxy.ftp:"',
'export socks_proxy=":proxy.socks:"'
],
'~/.bashrc': [
'export http_proxy=":proxy.http:"',
'export https_proxy=":proxy.https:"',
'export ftp_proxy=":proxy.ftp:"',
'export socks_proxy=":proxy.socks:"'
]
},
'linuxmint': {
'/etc/environment': [
'http_proxy=":proxy.http:"',
'https_proxy=":proxy.https:"',
'ftp_proxy=":proxy.ftp:"',
'socks_proxy=":proxy.socks:"'
],
'/etc/wgetrc': [
'http_proxy=":proxy.http:"',
'https_proxy=":proxy.https:"',
'ftp_proxy=":proxy.ftp:"',
'socks_proxy=":proxy.socks:"'
],
'/etc/apt/apt.conf': [
'Acquire::http::proxy ":proxy.http:";',
'Acquire::https::proxy ":proxy.https:";',
'Acquire::ftp::proxy ":proxy.ftp:";',
'Acquire::socks::proxy ":proxy.socks:";'
],
'/etc/bash.bashrc': [
'export http_proxy=":proxy.http:"',
'export https_proxy=":proxy.https:"',
'export ftp_proxy=":proxy.ftp:"',
'export socks_proxy=":proxy.socks:"'
],
'~/.bashrc': [
'export http_proxy=":proxy.http:"',
'export https_proxy=":proxy.https:"',
'export ftp_proxy=":proxy.ftp:"',
'export socks_proxy=":proxy.socks:"'
]
}
}
SUPPRESS = '==SUPPRESS=='
def die(error, message=None):
if message:
print message
print error
exit(1)
class ProxyType(object):
def __init__(self, https=True, http=True, ftp=True, socks=True):
self.http = http
self.https = https
self.ftp = ftp
self.socks = socks
class Proxy(object):
enabled = ProxyType()
def __init__(self, s, pn, u=None, p=None):
if u and p:
proxy_string = "%s:%s@%s:%d" % (u, p, s, pn)
else:
proxy_string = "%s:%d" % (s, pn)
self.http = "http://" + proxy_string
self.https = "https://" + proxy_string
self.ftp = "ftp://" + proxy_string
self.socks = "socks://" + proxy_string
def set_enabled_proxies(self, enabled):
"""
:type enabled: ProxyType
:param enabled: Enabled proxy types
"""
self.enabled = enabled
def process(self, filename, patterns, clear=False):
"""
:type filename: str
:param filename: Proxy configuration file
:type patterns: list
:param patterns: Proxy configuration patterns
"""
proxy_pattern_string = r'(?:[^:]+:[^@]+@)?' \
r'((?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|' \
r'(?:([\da-z\.-]+)\.([a-z\.]{2,6}))):[\d]{1,5}'
http_proxy_pattern = ':proxy.http:'
https_proxy_pattern = ':proxy.https:'
ftp_proxy_pattern = ':proxy.ftp:'
socks_proxy_pattern = ':proxy.socks:'
if not os.path.isfile(filename):
print "Creating file: %s..." % filename
# noinspection PyBroadException
try:
f = open(filename, 'w+') # Trying to create a new file or open one
f.close()
except:
die('Something went wrong! Can\'t create file: %s?' % filename)
self.clear_proxies(filename, clear)
f = open(filename, 'a')
print "Setting proxy in file: %s..." % filename
f.write("\n")
for pattern in patterns:
if self.enabled.http and pattern.find(http_proxy_pattern) >= 0:
f.write(pattern.replace(http_proxy_pattern, self.http) + '\n')
else:
if self.enabled.https and pattern.find(https_proxy_pattern) >= 0:
f.write(pattern.replace(https_proxy_pattern, self.https) + '\n')
else:
if self.enabled.ftp and pattern.find(ftp_proxy_pattern) >= 0:
f.write(pattern.replace(ftp_proxy_pattern, self.ftp) + '\n')
else:
if self.enabled.socks and pattern.find(socks_proxy_pattern) >= 0:
f.write(pattern.replace(socks_proxy_pattern, self.socks) + '\n')
f.close()
@staticmethod
def clear_proxies(filename, clear, write=False):
"""
:type filename: str
:param filename: Proxy configuration file
:type proxy_pattern_string: str
:param proxy_pattern_string: Proxy regular expression
"""
i = 1
print "\nClearing old proxy from file: %s..." % filename
for line in fileinput.input(filename, inplace=True):
# if re.match(proxy_pattern_string, line, re.I):
proxy_type = line.find('http') >= 0 or line.find('https') >= 0 or line.find('ftp') >= 0 or line.find(
'socks') >= 0
if line.find('proxy') >= 0 and proxy_type:
if line.strip()[0] is '#':
if not clear:
sys.stdout.write(line)
else:
if not clear:
if write:
sys.stdout.write(line)
else:
sys.stdout.write('# %s' % line)
sys.stderr.write("line #%d: %s" % (i, line))
else:
sys.stdout.write(line)
i += 1
def get_files():
if os.getuid() != 0:
die("Error: run it with sudo")
dist = platform.linux_distribution()
files = None
if len(dist[0]):
dist_name = dist[0].lower()
print "Checking configuration for %s..." % dist_name
if dist_name not in all_files:
die('Error: No proxy configurations for %s.' % dist_name)
files = all_files[dist_name]
else:
die('Cannot detect operation system.')
return files
class _TestAction(argparse.Action):
def __init__(self, option_strings, dest=SUPPRESS, default=SUPPRESS, help=None):
super(_TestAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
files = get_files()
for filename in files:
Proxy.clear_proxies(os.path.expanduser(filename), False, True)
parser.exit()
class _DeleteAction(argparse.Action):
def __init__(self, option_strings, dest=SUPPRESS, default=SUPPRESS, help=None):
super(_DeleteAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
files = get_files()
for filename in files:
Proxy.clear_proxies(os.path.expanduser(filename), True, False)
parser.exit()
def run():
parser = argparse.ArgumentParser(prog='setproxy', description="Easy proxy configuration")
parser.register('action', 'test', _TestAction)
parser.register('action', 'delete', _DeleteAction)
parser.add_argument('server', type=str, help='Proxy server address. ex: 202.141.80.19 or tamdil.iitg.ernet.in')
parser.add_argument('port', type=int, default=3128, help='Proxy port. commonly used ports are 3128, 8080 and 1080.',
nargs='?')
parser.add_argument('-u', '--user', type=str, default=None, help='Username for proxy authentication.')
parser.add_argument('-p', '--password', type=str, default=None, help='Password for proxy authentication.')
parser.add_argument('--http', dest='http', action='store_true', help='Enable http proxy setting (Default: Enabled)')
parser.add_argument('--https', dest='https', action='store_true',
help='Enable https proxy setting (Default: Enabled)')
parser.add_argument('--ftp', dest='ftp', action='store_true', help='Enable ftp proxy setting')
parser.add_argument('--socks', dest='socks', action='store_true', help='Enable socks proxy setting')
parser.add_argument('--no-http', dest='http', action='store_false', help='Disable http proxy setting')
parser.add_argument('--no-https', dest='https', action='store_false', help='Disable https proxy setting')
parser.add_argument('--no-ftp', dest='ftp', action='store_false',
help='Disable ftp proxy setting (Default: Disabled)')
parser.add_argument('--no-socks', dest='socks', action='store_false',
help='Disable socks proxy setting (Default: Disabled)')
parser.add_argument('--clear', dest='clear', action='store_true',
help='Delete old proxy settings, while creating new.')
parser.add_argument('-t', '--test', action='test', default=SUPPRESS, help='Display old proxy settings')
parser.add_argument('-d', '--delete', action='delete', default=SUPPRESS, help='Delete old proxy settings')
parser.set_defaults(http=True)
parser.set_defaults(https=True)
parser.set_defaults(ftp=False)
parser.set_defaults(socks=False)
parser.set_defaults(clear=False)
args = parser.parse_args()
proxy = Proxy(args.server, args.port, args.user, args.password)
proxy_type = ProxyType(args.https, args.http, args.ftp, args.socks)
proxy.set_enabled_proxies(proxy_type)
clear = args.clear
files = get_files()
if clear:
print "Deleting old proxy settings..."
for filename, patterns in files.iteritems():
print ''
proxy.process(os.path.expanduser(filename), patterns, clear=clear)
pass
run()
| znck/setproxy | setproxy.py | Python | mit | 10,757 |
from player_class import Player, randint
"""
Dealer Class
"""
class Dealer(Player):
house_rule_names =[
"Dealer stand on all 17",
"Dealer hit on soft 17"
]
def __init__(self, n, c, t, h):
Player.__init__(self, n, c, t, 0, 0)
self._house = h
self._house_rule_name = Dealer.house_rule_names[self.house-1]
@property
def house(self):
return self._house
@house.setter
def house(self, h):
self._house = h
@property
def house_rule_name(self):
return self._house_rule_name
def greeting(self):
""" Print Dealer greeting to console."""
star ='*'
print(star*50)
print("Hello I'm your dealer {name} \nLet's Play Blackjack!"\
.format(name = self.name))
print(star*50)
def show_card(self):
""" Output dealer's 'up' card to the console."""
print"Dealer: {h}, hidden card".format(h = self.hand[0].display)
def highest(self, players):# This is not used! Not how blackjack is played
""" Check if dealer has higher score than all players."""
for player in players:
if player.get_score() > self.get_score():
return False
return True
def stand_on_all_17(self):
""" Dealer stands on any hand that is 17."""
if self.score < 17:
return True
else:
return False
def hit_on_soft_17(self):
""" Dealer hits on hands that are 17 if has an Ace in hand."""
if self.score < 17:
return True
elif self.score == 17:
for card in self.hand:
if card.value == 11:
return True
else:
return False
def check_hit(self):
""" Decide to hit or stay based on house rules. """
self.get_score()
if self.house == 1:
return self.stand_on_all_17()
if self.house == 2:
return self.hit_on_soft_17()
| pasquantonio/blackjack_terminal | src/dealer_class.py | Python | mit | 1,678 |
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from ..orders.models import Order
@python_2_unicode_compatible
class PaymentForm(models.Model):
name = models.CharField(verbose_name=_("Name"), max_length=25)
def __str__(self):
return self.name
class Meta:
verbose_name = _("Form of payments")
verbose_name_plural = _("Forms of payments")
class PaymentQuerySet(models.QuerySet):
pass
@python_2_unicode_compatible
class Payment(models.Model):
order = models.OneToOneField(Order, verbose_name=_("Order"))
cashier = models.ForeignKey(settings.AUTH_USER_MODEL)
form = models.ForeignKey(PaymentForm, verbose_name=_("Form of payment"))
created = models.DateTimeField(verbose_name=_("Creation date"), auto_now_add=True)
objects = PaymentQuerySet.as_manager()
class Meta:
verbose_name = _("Payment")
verbose_name_plural = _("Payments")
ordering = ['created', ]
def __str__(self):
return _("Payment #%d") % (self.pk or 0, )
def get_update_url(self):
return reverse('payments:update', kwargs={'pk': str(self.pk)})
def get_delete_url(self):
return reverse('payments:delete', kwargs={'pk': str(self.pk)})
| ad-m/taravel | taravel/payments/models.py | Python | mit | 1,444 |
"""Abstract base class for programs.
"""
import os
from . import limit
import resource
import signal
import logging
from .errors import ProgramError
class Program(object):
"""Abstract base class for programs.
"""
runtime = 0
def run(self, infile='/dev/null', outfile='/dev/null', errfile='/dev/null',
args=None, timelim=1000, memlim=1024):
"""Run the program.
Args:
infile (str): name of file to pass on stdin
outfile (str): name of file to send stdout to
errfile (str): name of file to send stderr ro
args (list of str): additional command-line arguments to
pass to the program
timelim (int): CPU time limit in seconds
memlim (int): memory limit in MB
Returns:
pair (status, runtime):
status (int): exit status of the process
runtime (float): user+sys runtime of the process, in seconds
"""
runcmd = self.get_runcmd(memlim=memlim)
if runcmd == []:
raise ProgramError('Could not figure out how to run %s' % self)
if args is None:
args = []
if self.should_skip_memory_rlimit():
memlim = None
status, runtime = self.__run_wait(runcmd + args,
infile, outfile, errfile,
timelim, memlim)
self.runtime = max(self.runtime, runtime)
return status, runtime
def code_size(self):
"""Subclasses should override this method with the total size of the
source code."""
return 0
def should_skip_memory_rlimit(self):
"""Ugly workaround to accommodate Java -- the JVM will crash and burn
if there is a memory rlimit applied and this will probably not
change anytime soon [time of writing this: 2017-02-05], see
e.g.: https://bugs.openjdk.java.net/browse/JDK-8071445
Subclasses of Program where the associated program is (or may
be) a Java program need to override this method and return
True (which will cause the memory rlimit to not be applied).
2019-02-22: Turns out sbcl for Common Lisp also wants to roam
free and becomes sad when reined in by a memory rlimit.
"""
return False
@staticmethod
def __run_wait(argv, infile, outfile, errfile, timelim, memlim):
logging.debug('run "%s < %s > %s 2> %s"',
' '.join(argv), infile, outfile, errfile)
pid = os.fork()
if pid == 0: # child
try:
# The Python interpreter internally sets some signal dispositions
# to SIG_IGN (notably SIGPIPE), and unless we reset them manually
# this leaks through to the program we exec. That can has some
# funny side effects, like programs not crashing as expected when
# trying to write to an interactive validator that has terminated
# and closed the read end of a pipe.
#
# This *shouldn't* cause any verdict changes given the setup for
# interactive problems, but reset them anyway, for sanity.
if hasattr(signal, "SIGPIPE"):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if hasattr(signal, "SIGXFZ"):
signal.signal(signal.SIGXFZ, signal.SIG_DFL)
if hasattr(signal, "SIGXFSZ"):
signal.signal(signal.SIGXFSZ, signal.SIG_DFL)
if timelim is not None:
limit.try_limit(resource.RLIMIT_CPU, timelim, timelim + 1)
if memlim is not None:
limit.try_limit(resource.RLIMIT_AS, memlim * (1024**2), resource.RLIM_INFINITY)
limit.try_limit(resource.RLIMIT_STACK,
resource.RLIM_INFINITY, resource.RLIM_INFINITY)
Program.__setfd(0, infile, os.O_RDONLY)
Program.__setfd(1, outfile,
os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
Program.__setfd(2, errfile,
os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.execvp(argv[0], argv)
except Exception as exc:
print("Oops. Fatal error in child process:")
print(exc)
os.kill(os.getpid(), signal.SIGTERM)
# Unreachable
logging.error("Unreachable part of run_wait reached")
os.kill(os.getpid(), signal.SIGTERM)
(pid, status, rusage) = os.wait4(pid, 0)
return status, rusage.ru_utime + rusage.ru_stime
@staticmethod
def __setfd(fd, filename, flag):
tmpfd = os.open(filename, flag)
os.dup2(tmpfd, fd)
os.close(tmpfd)
| Kattis/problemtools | problemtools/run/program.py | Python | mit | 4,904 |
"""
__init__ file
"""
from .status import Status
from .atcab import *
| PhillyNJ/SAMD21 | cryptoauthlib/python/cryptoauthlib/__init__.py | Python | mit | 71 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import
import frappe
import json
from email.utils import formataddr, parseaddr
from frappe.utils import get_url, get_formatted_email, cstr
from frappe.utils.file_manager import get_file
import frappe.email.smtp
from frappe import _
from frappe.model.document import Document
class Communication(Document):
no_feed_on_delete = True
"""Communication represents an external communication like Email."""
def get_parent_doc(self):
"""Returns document of `reference_doctype`, `reference_doctype`"""
if not hasattr(self, "parent_doc"):
if self.reference_doctype and self.reference_name:
self.parent_doc = frappe.get_doc(self.reference_doctype, self.reference_name)
else:
self.parent_doc = None
return self.parent_doc
def on_update(self):
"""Update parent status as `Open` or `Replied`."""
self.update_parent()
def update_parent(self):
"""Update status of parent document based on who is replying."""
parent = self.get_parent_doc()
if not parent:
return
status_field = parent.meta.get_field("status")
if status_field and "Open" in (status_field.options or "").split("\n"):
to_status = "Open" if self.sent_or_received=="Received" else "Replied"
if to_status in status_field.options.splitlines():
frappe.db.set_value(parent.doctype, parent.name, "status", to_status)
def send(self, print_html=None, print_format=None, attachments=None,
send_me_a_copy=False):
"""Send communication via Email.
:param print_html: Send given value as HTML attachment.
:param print_format: Attach print format of parent document."""
self.send_me_a_copy = send_me_a_copy
self.notify(print_html, print_format, attachments)
def set_incoming_outgoing_accounts(self):
self.incoming_email_account = self.outgoing_email_account = None
if self.reference_doctype:
self.incoming_email_account = frappe.db.get_value("Email Account",
{"append_to": self.reference_doctype, "enable_incoming": 1}, "email_id")
self.outgoing_email_account = frappe.db.get_value("Email Account",
{"append_to": self.reference_doctype, "enable_outgoing": 1}, "email_id")
if not self.incoming_email_account:
self.incoming_email_account = frappe.db.get_value("Email Account", {"default_incoming": 1}, "email_id")
if not self.outgoing_email_account:
self.outgoing_email_account = frappe.db.get_value("Email Account", {"default_outgoing": 1}, "email_id")
def notify(self, print_html=None, print_format=None, attachments=None, except_recipient=False):
self.prepare_to_notify(print_html, print_format, attachments)
recipients = self.get_recipients(except_recipient=except_recipient)
frappe.sendmail(
recipients=recipients,
sender=self.sender,
reply_to=self.incoming_email_account,
subject=self.subject,
content=self.content,
reference_doctype=self.reference_doctype,
reference_name=self.reference_name,
attachments=self.attachments,
message_id=self.name,
unsubscribe_message=_("Leave this conversation"),
bulk=True
)
def prepare_to_notify(self, print_html=None, print_format=None, attachments=None):
"""Prepare to make multipart MIME Email
:param print_html: Send given value as HTML attachment.
:param print_format: Attach print format of parent document."""
if print_format:
self.content += self.get_attach_link(print_format)
self.set_incoming_outgoing_accounts()
if not self.sender:
self.sender = formataddr([frappe.session.data.full_name or "Notification", self.outgoing_email_account])
self.attachments = []
if print_html or print_format:
self.attachments.append(frappe.attach_print(self.reference_doctype, self.reference_name,
print_format=print_format, html=print_html))
if attachments:
if isinstance(attachments, basestring):
attachments = json.loads(attachments)
for a in attachments:
if isinstance(a, basestring):
# is it a filename?
try:
file = get_file(a)
self.attachments.append({"fname": file[0], "fcontent": file[1]})
except IOError:
frappe.throw(_("Unable to find attachment {0}").format(a))
else:
self.attachments.append(a)
def get_recipients(self, except_recipient=False):
"""Build a list of users to which this email should go to"""
# [EDGE CASE] self.recipients can be None when an email is sent as BCC
original_recipients = [s.strip() for s in cstr(self.recipients).split(",")]
recipients = original_recipients[:]
if self.reference_doctype and self.reference_name:
recipients += self.get_earlier_participants()
recipients += self.get_commentors()
recipients += self.get_assignees()
recipients += self.get_starrers()
# remove unsubscribed recipients
unsubscribed = [d[0] for d in frappe.db.get_all("User", ["name"], {"thread_notify": 0}, as_list=True)]
email_accounts = [d[0] for d in frappe.db.get_all("Email Account", ["email_id"], {"enable_incoming": 1}, as_list=True)]
sender = parseaddr(self.sender)[1]
filtered = []
for e in list(set(recipients)):
if (e=="Administrator") or ((e==self.sender) and (e not in original_recipients)) or \
(e in unsubscribed) or (e in email_accounts):
continue
email_id = parseaddr(e)[1]
if email_id==sender or email_id in unsubscribed or email_id in email_accounts:
continue
if except_recipient and (e==self.recipients or email_id==self.recipients):
# while pulling email, don't send email to current recipient
continue
if e not in filtered and email_id not in filtered:
filtered.append(e)
if getattr(self, "send_me_a_copy", False):
filtered.append(self.sender)
return filtered
def get_starrers(self):
"""Return list of users who have starred this document."""
if self.reference_doctype and self.reference_name:
return self.get_parent_doc().get_starred_by()
else:
return []
def get_earlier_participants(self):
return frappe.db.sql_list("""
select distinct sender
from tabCommunication where
reference_doctype=%s and reference_name=%s""",
(self.reference_doctype, self.reference_name))
def get_commentors(self):
return frappe.db.sql_list("""
select distinct comment_by
from tabComment where
comment_doctype=%s and comment_docname=%s and
ifnull(unsubscribed, 0)=0 and comment_by!='Administrator'""",
(self.reference_doctype, self.reference_name))
def get_assignees(self):
return [d.owner for d in frappe.db.get_all("ToDo", filters={"reference_type": self.reference_doctype,
"reference_name": self.reference_name, "status": "Open"}, fields=["owner"])]
def get_attach_link(self, print_format):
"""Returns public link for the attachment via `templates/emails/print_link.html`."""
return frappe.get_template("templates/emails/print_link.html").render({
"url": get_url(),
"doctype": self.reference_doctype,
"name": self.reference_name,
"print_format": print_format,
"key": self.get_parent_doc().get_signature()
})
def on_doctype_update():
"""Add index in `tabCommunication` for `(reference_doctype, reference_name)`"""
frappe.db.add_index("Communication", ["reference_doctype", "reference_name"])
@frappe.whitelist()
def make(doctype=None, name=None, content=None, subject=None, sent_or_received = "Sent",
sender=None, recipients=None, communication_medium="Email", send_email=False,
print_html=None, print_format=None, attachments='[]', ignore_doctype_permissions=False,
send_me_a_copy=False):
"""Make a new communication.
:param doctype: Reference DocType.
:param name: Reference Document name.
:param content: Communication body.
:param subject: Communication subject.
:param sent_or_received: Sent or Received (default **Sent**).
:param sender: Communcation sender (default current user).
:param recipients: Communication recipients as list.
:param communication_medium: Medium of communication (default **Email**).
:param send_mail: Send via email (default **False**).
:param print_html: HTML Print format to be sent as attachment.
:param print_format: Print Format name of parent document to be sent as attachment.
:param attachments: List of attachments as list of files or JSON string.
:param send_me_a_copy: Send a copy to the sender (default **False**).
"""
is_error_report = (doctype=="User" and name==frappe.session.user and subject=="Error Report")
if doctype and name and not is_error_report and not frappe.has_permission(doctype, "email", name) and not ignore_doctype_permissions:
raise frappe.PermissionError("You are not allowed to send emails related to: {doctype} {name}".format(
doctype=doctype, name=name))
if not sender and frappe.session.user != "Administrator":
sender = get_formatted_email(frappe.session.user)
comm = frappe.get_doc({
"doctype":"Communication",
"subject": subject,
"content": content,
"sender": sender,
"recipients": recipients,
"communication_medium": "Email",
"sent_or_received": sent_or_received,
"reference_doctype": doctype,
"reference_name": name
})
comm.insert(ignore_permissions=True)
if send_email:
comm.send(print_html, print_format, attachments, send_me_a_copy=send_me_a_copy)
return comm.name
@frappe.whitelist()
def get_convert_to():
return frappe.get_hooks("communication_convert_to")
| gangadharkadam/vlinkfrappe | frappe/core/doctype/communication/communication.py | Python | mit | 9,355 |
'''Trains a stacked what-where autoencoder built on residual blocks on the
FASHION MNIST dataset. It exemplifies two influential methods that have been developed
in the past few years.
The first is the idea of properly 'unpooling.' During any max pool, the
exact location (the 'where') of the maximal value in a pooled receptive field
is lost, however it can be very useful in the overall reconstruction of an
input image. Therefore, if the 'where' is handed from the encoder
to the corresponding decoder layer, features being decoded can be 'placed' in
the right location, allowing for reconstructions of much higher fidelity.
References:
[1]
'Visualizing and Understanding Convolutional Networks'
Matthew D Zeiler, Rob Fergus
https://arxiv.org/abs/1311.2901v3
[2]
'Stacked What-Where Auto-encoders'
Junbo Zhao, Michael Mathieu, Ross Goroshin, Yann LeCun
https://arxiv.org/abs/1506.02351v8
The second idea exploited here is that of residual learning. Residual blocks
ease the training process by allowing skip connections that give the network
the ability to be as linear (or non-linear) as the data sees fit. This allows
for much deep networks to be easily trained. The residual element seems to
be advantageous in the context of this example as it allows a nice symmetry
between the encoder and decoder. Normally, in the decoder, the final
projection to the space where the image is reconstructed is linear, however
this does not have to be the case for a residual block as the degree to which
its output is linear or non-linear is determined by the data it is fed.
However, in order to cap the reconstruction in this example, a hard softmax is
applied as a bias because we know the Fashion MNIST images are mapped to [0,1].
References:
[3]
'Deep Residual Learning for Image Recognition'
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
https://arxiv.org/abs/1512.03385v1
[4]
'Identity Mappings in Deep Residual Networks'
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
https://arxiv.org/abs/1603.05027v3
'''
from __future__ import print_function
import numpy as np
import fashion_mnist
from keras.models import Model
from keras.layers import Activation
from keras.layers import UpSampling2D, Conv2D, MaxPooling2D
from keras.layers import Input, BatchNormalization, ELU
import matplotlib.pyplot as plt
import keras.backend as K
from keras import layers
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
"""The proposed residual block from [4].
Running with elu=True will use ELU nonlinearity and running with
elu=False will use BatchNorm + RELU nonlinearity. While ELU's are fast
due to the fact they do not suffer from BatchNorm overhead, they may
overfit because they do not offer the stochastic element of the batch
formation process of BatchNorm, which acts as a good regularizer.
# Arguments
x: 4D tensor, the tensor to feed through the block
nfeats: Integer, number of feature maps for conv layers.
ksize: Integer, width and height of conv kernels in first convolution.
nskipped: Integer, number of conv layers for the residual function.
elu: Boolean, whether to use ELU or BN+RELU.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)`
# Output shape
4D tensor with shape:
`(batch, filters, rows, cols)`
"""
y0 = Conv2D(nfeats, ksize, padding='same')(x)
y = y0
for i in range(nskipped):
if elu:
y = ELU()(y)
else:
y = BatchNormalization(axis=1)(y)
y = Activation('relu')(y)
y = Conv2D(nfeats, 1, padding='same')(y)
return layers.add([y0, y])
def getwhere(x):
''' Calculate the 'where' mask that contains switches indicating which
index contained the max value when MaxPool2D was applied. Using the
gradient of the sum is a nice trick to keep everything high level.'''
y_prepool, y_postpool = x
return K.gradients(K.sum(y_postpool), y_prepool)
if K.backend() == 'tensorflow':
raise RuntimeError('This example can only run with the '
'Theano backend for the time being, '
'because it requires taking the gradient '
'of a gradient, which isn\'t '
'supported for all TF ops.')
# This example assume 'channels_first' data format.
K.set_image_data_format('channels_first')
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, _), (x_test, _) = fashion_mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# The size of the kernel used for the MaxPooling2D
pool_size = 2
# The total number of feature maps at each layer
nfeats = [8, 16, 32, 64, 128]
# The sizes of the pooling kernel at each layer
pool_sizes = np.array([1, 1, 1, 1, 1]) * pool_size
# The convolution kernel size
ksize = 3
# Number of epochs to train for
epochs = 5
# Batch size during training
batch_size = 128
if pool_size == 2:
# if using a 5 layer net of pool_size = 2
x_train = np.pad(x_train, [[0, 0], [0, 0], [2, 2], [2, 2]],
mode='constant')
x_test = np.pad(x_test, [[0, 0], [0, 0], [2, 2], [2, 2]], mode='constant')
nlayers = 5
elif pool_size == 3:
# if using a 3 layer net of pool_size = 3
x_train = x_train[:, :, :-1, :-1]
x_test = x_test[:, :, :-1, :-1]
nlayers = 3
else:
import sys
sys.exit('Script supports pool_size of 2 and 3.')
# Shape of input to train on (note that model is fully convolutional however)
input_shape = x_train.shape[1:]
# The final list of the size of axis=1 for all layers, including input
nfeats_all = [input_shape[0]] + nfeats
# First build the encoder, all the while keeping track of the 'where' masks
img_input = Input(shape=input_shape)
# We push the 'where' masks to the following list
wheres = [None] * nlayers
y = img_input
for i in range(nlayers):
y_prepool = convresblock(y, nfeats=nfeats_all[i + 1], ksize=ksize)
y = MaxPooling2D(pool_size=(pool_sizes[i], pool_sizes[i]))(y_prepool)
wheres[i] = layers.Lambda(
getwhere, output_shape=lambda x: x[0])([y_prepool, y])
# Now build the decoder, and use the stored 'where' masks to place the features
for i in range(nlayers):
ind = nlayers - 1 - i
y = UpSampling2D(size=(pool_sizes[ind], pool_sizes[ind]))(y)
y = layers.multiply([y, wheres[ind]])
y = convresblock(y, nfeats=nfeats_all[ind], ksize=ksize)
# Use hard_simgoid to clip range of reconstruction
y = Activation('hard_sigmoid')(y)
# Define the model and it's mean square error loss, and compile it with Adam
model = Model(img_input, y)
model.compile('adam', 'mse')
# Fit the model
model.fit(x_train, x_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, x_test))
# Plot
x_recon = model.predict(x_test[:25])
x_plot = np.concatenate((x_test[:25], x_recon), axis=1)
x_plot = x_plot.reshape((5, 10, input_shape[-2], input_shape[-1]))
x_plot = np.vstack([np.hstack(x) for x in x_plot])
plt.figure()
plt.axis('off')
plt.title('Test Samples: Originals/Reconstructions')
plt.imshow(x_plot, interpolation='none', cmap='gray')
plt.savefig('reconstructions.png')
| asaleh/Fashion-MNIST-Keras | fashion_mnist_swwae.py | Python | mit | 7,773 |
import unittest
from clickatell import Transport
import json
class TransportTest(unittest.TestCase):
def test_parseLegacyFailure(self):
response = {'body': 'ERR: Some exception'}
transport = Transport()
self.assertRaises(ClickatellError, lambda: transport.parseLegacy(response))
def test_parseLegacyMultiFailure(self):
response = {'body': 'ERR: 301, Some Failure\nOK:12345'}
transport = Transport()
result = transport.parseLegacy(response)
self.assertTrue(len(result) == 2)
self.assertTrue(result[0]['code'] == '301')
self.assertTrue(result[1]['OK'] == '12345')
def test_parseLegacy(self):
response = {'body': 'OK: 1234 Test: 12345'}
transport = Transport()
result = transport.parseLegacy(response)
self.assertTrue(result['OK'] == '1234')
self.assertTrue(result['Test'] == '12345')
def test_parseRestFailure(self):
response = {'body': json.dumps({'error':{'description':'Error','code':'301'}})}
transport = Transport()
self.assertRaises(ClickatellError, lambda: transport.parseRest(response))
def test_parseRest(self):
response = {'body': json.dumps({'data':True})}
transport = Transport()
self.assertTrue(transport.parseRest(response))
def test_merge(self):
transport = Transport()
dict1 = {'test': 1}
dict2 = {'test': 2, 'test2': 3}
dict3 = {'test1': 1, 'test2': 2}
merge = transport.merge(dict1, dict2, dict3)
self.assertTrue(merge['test'] == 2)
self.assertTrue(merge['test2'] == 2)
self.assertTrue(merge['test1'] == 1) | clickatell/clickatell-python | test/test_transport.py | Python | mit | 1,677 |
from setuptools import setup, find_packages
import os
version = '1.2~dev'
base = os.path.dirname(__file__)
readme = open(os.path.join(base, 'README.rst')).read()
changelog = open(os.path.join(base, 'CHANGELOG.rst')).read()
setup(name='restdns',
version=version,
description='Rest API for DNS',
long_description=readme + '\n' + changelog,
classifiers=[],
keywords='django dns rest',
author='Antoine Millet',
author_email='[email protected]',
url='https://github.com/NaPs/Restdns',
license='MIT',
data_files=(
('/etc/', ('etc/restdns.conf',)),
),
scripts=['restdnsadm'],
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=['django'])
| NaPs/restdns | setup.py | Python | mit | 817 |
import copy
import json
import re
from urllib.parse import urlparse
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.service import OpenGraphThumbMixin
from svtplay_dl.service import Service
class Vg(Service, OpenGraphThumbMixin):
supported_domains = ["vg.no", "vgtv.no"]
def get(self):
data = self.get_urldata()
match = re.search(r'data-videoid="([^"]+)"', data)
if not match:
parse = urlparse(self.url)
match = re.search(r"video/(\d+)/", parse.fragment)
if not match:
yield ServiceError("Can't find video file for: {}".format(self.url))
return
videoid = match.group(1)
data = self.http.request("get", "http://svp.vg.no/svp/api/v1/vgtv/assets/{}?appName=vgtv-website".format(videoid)).text
jsondata = json.loads(data)
self.output["title"] = jsondata["title"]
if "hds" in jsondata["streamUrls"]:
streams = hdsparse(
self.config,
self.http.request("get", jsondata["streamUrls"]["hds"], params={"hdcore": "3.7.0"}),
jsondata["streamUrls"]["hds"],
output=self.output,
)
for n in list(streams.keys()):
yield streams[n]
if "hls" in jsondata["streamUrls"]:
streams = hlsparse(
self.config,
self.http.request("get", jsondata["streamUrls"]["hls"]),
jsondata["streamUrls"]["hls"],
output=self.output,
)
for n in list(streams.keys()):
yield streams[n]
if "mp4" in jsondata["streamUrls"]:
yield HTTP(copy.copy(self.config), jsondata["streamUrls"]["mp4"], output=self.output)
| olof/debian-svtplay-dl | lib/svtplay_dl/service/vg.py | Python | mit | 1,907 |
"""
CS256 ISA Assembler
Author: Mark Liffiton
"""
import collections
import configparser
import re
import sys
from pathlib import PurePath
class AssemblerException(Exception):
def __init__(self, msg, data=None, lineno=None, instruction=None):
self.msg = msg
self.data = data
self.lineno = lineno
self.inst = instruction
def __str__(self):
ret = self.msg
if self.data:
ret += ": {}".format(self.data)
if self.inst:
ret += "\n In line {}: {}".format(self.lineno, self.inst)
return ret
class Assembler:
"""Assembles CS256 assembly code into machine code following definitions
given in the specified config file."""
def __init__(self, configfile, info_callback=None):
# manipulate configfile and samplefile as PurePath objects
self.configfile = PurePath(configfile)
config = configparser.SafeConfigParser()
config.read(self.configfile)
self.name = config.get('general', 'name')
self.inst_size = config.getint('general', 'inst_size')
self.max_reg = config.getint('general', 'max_reg')
self.reg_prefix = config.get('general', 'reg_prefix')
# Samplefile should be in same directory as config file
self.samplefile = self.configfile.parent / config.get('general', 'samplefile')
self.special_regs = \
{x: int(y) for x, y in config.items('special_regs')}
self.field_sizes = \
{x: int(y) for x, y in config.items('field_sizes')}
self.instructions = collections.defaultdict(dict)
for inst, opcode in config.items('instruction_opcodes'):
self.instructions[inst]['opcode'] = int(opcode)
for inst, parts in config.items('instruction_parts'):
self.instructions[inst]['parts'] = parts
for inst, tweak in config.items('instruction_tweaks'):
self.instructions[inst]['tweak'] = tweak
if 'instruction_funccodes' in config.sections():
for inst, funccode in config.items('instruction_funccodes'):
self.instructions[inst]['funccode'] = int(funccode)
self.report_commas = True
self.palette = [
'#6D993B',
'#A37238',
'#AC4548',
'#6048A3',
'#449599',
]
# create sizes and arg counts for each instruction
# modifies instruction dictionaries within self.instructions
for inst_info in self.instructions.values():
# figure out number of required arguments to instruction
# (number of parts, not counting opcode(s) or function code(s) or 'x' space)
parts = inst_info['parts']
inst_info['args'] = len(parts) - parts.count('o') - parts.count('f') - parts.count('x')
# figure out sizes (for shift amounts)
sizes = []
rem = self.inst_size # remaining bits
for c in inst_info['parts']:
if c in self.field_sizes:
sizes.append(self.field_sizes[c])
rem -= self.field_sizes[c]
if rem:
sizes.append(rem) # immediate (or extra) gets all remaining bits
inst_info['sizes'] = sizes
# Used internally
self.inst_regex = r"({})\s".format("|".join(self.instructions))
self.labels = {}
self.cur_inst = None # used for error reporting
self.cur_lineno = None # used for error reporting
self.info_callback = info_callback
def register_info_callback(self, info_callback):
self.info_callback = info_callback
def assemble_instruction(self, inst, lineno, pc):
"""Produce the binary encoding of one instruction."""
assert re.match(self.inst_regex, inst)
self.cur_inst = inst
self.cur_lineno = lineno
if "," in inst:
if self.report_commas:
self.report_inf("Invalid comma found (stripping all commas)", inst)
self.report_commas = False
inst = inst.replace(',',' ')
# split instruction into parts
inst_parts = inst.split()
op = inst_parts[0]
args = inst_parts[1:]
inst_info = self.instructions[op]
if inst_info['tweak'] == "flip_regs":
# Swap args[0] and args[1]
# e.g., for a Store instruction w/ dest address written first but it needs to be 2nd reg.
args[0], args[1] = args[1], args[0]
# # !!! For sb: given "sb r1 r2 imm", we want r1 in s1 (place 3) and r2 in s0 (place 1)"
# if op == 'sb':
# data_r = parts[2]
# addr_r = parts[1]
# # !!! insert empty 'part' for the unused portion of the instruction
# parts.insert(2, 0)
# parts[1] = addr_r
# parts[3] = data_r
#
# # !!! For lb: given "lb r1 r2 imm", we want r1 in dest (place 2) and r2 in s0 (place 1)"
# if op == 'lb':
# data_r = parts[2]
# addr_r = parts[1]
# # !!! insert empty 'part' for the unused portion of the instruction
# parts.insert(3, 0)
# parts[1] = addr_r
# parts[2] = data_r
# check for the correct number of arguments
if inst_info['args'] != len(args):
self.report_err(
"Incorrect number of arguments in instruction (expected {}, got {})".format(inst_info['args'], len(args)),
inst
)
sys.exit(2)
# parse each part (get a numerical value for it)
# and shift appropriate amount, summing each
instruction = 0
parts = inst_info['parts']
sizes = inst_info['sizes']
for i in range(len(parts)):
c = parts[i]
size = sizes[i]
shamt = sum(sizes[i+1:])
# r, l, j, and i have arguments, opcode and funccode do not
if c in ['r', 'l', 'j', 'i']:
arg = args.pop(0)
val = self.parse_part(c, inst_info, pc, arg)
else:
val = self.parse_part(c, inst_info, pc)
# check immediate or branch size
if c in ['l', 'j', 'i']:
if val >= 2**(size-1) or val < -2**(size-1):
self.report_err(
"Immediate/Label out of range",
"{}-bit space, but |{}| > 2^{}".format(size, val, size-1)
)
sys.exit(2)
# fit negative values into given # of bits
val = val % 2**size
# print "Shifting: {} << {}".format(val, shamt)
instruction += val << shamt
return instruction
def parse_part(self, type, inst_info, pc, arg=None):
"""Parse one argument of an instruction (opcode, register,
immediate, or label).
"""
if type == 'o':
return inst_info['opcode']
elif type == 'f':
return inst_info['funccode']
elif type == 'r' and arg in self.special_regs:
return self.special_regs[arg]
elif type == 'r' and re.match(r"^{}\d+$".format(re.escape(self.reg_prefix)), arg):
regindex = int(arg[1:])
if regindex > self.max_reg:
self.report_err("Register out of range", regindex)
sys.exit(2)
return regindex
elif type == 'i' and re.match(r"^-?\d+$|^-?0x[a-fA-F0-9]+$|^-?0b[01]+$", arg):
try:
return int(arg,0)
except ValueError as e:
self.report_err(str(e))
elif type == 'j' and arg in self.labels:
return self.labels[arg]
elif type == 'l' and arg in self.labels:
# offset from pc, so store instruction count - pc
return self.labels[arg] - pc
elif type == 'x': # unused - fill w/ zero bits
return 0
else:
self.report_err("Invalid instruction argument", arg)
sys.exit(2)
def assemble_instructions(self, instructions):
"""Assemble a list of instructions."""
return [self.assemble_instruction(inst[0], inst[1], i) for i, inst in enumerate(instructions)]
def first_pass(self, lines):
"""Take a first pass through the code, cleaning, stripping, and
determining label addresses."""
# clear the labels (in case this object is reused)
self.labels = {}
instructions = []
for lineno, line in enumerate(lines):
# one-based counting for lines
lineno += 1
# strip comments
line = line.partition("#")[0]
# clean up
line = line.lower().strip()
if not line:
# it's a comment or blank!
continue
if re.match(self.inst_regex, line):
# it's an instruction!
instructions.append((line, lineno))
elif re.match("^[a-z][a-z0-9]*:$", line):
# store the label (strip the colon)
self.labels[line[:-1]] = len(instructions)
else:
# Uh oh...
self.report_inf("Invalid line (ignoring)", "{}: {}".format(lineno, line))
return instructions
def assemble_lines(self, lines):
"""Fully assemble a list of lines of assembly code.
Returns a list of binary-encoded instructions.
"""
instructions = self.first_pass(lines)
instructions_bin = self.assemble_instructions(instructions)
return (instructions, instructions_bin)
def prettyprint_assembly(self, instructions, instructions_bin, colorize=False):
"""Return a pretty-printed string of the instructions and their
assembled machine code to stdout.
"""
# setup linelabels to map line numbers to labels
linelabels = {line: label for (label, line) in self.labels.items()}
if instructions:
max_inst_width = max(len(inst[0]) for inst in instructions)
max_inst_width = max(max_inst_width, 12) # always at *least* 12 chars
else:
max_inst_width = 15
header = " #: {0:<{1}} {2:<20} {3}\n".format("Instruction", max_inst_width, "Binary", "Hex")
header += "-" * len(header) + "\n"
ret = header
for i in range(len(instructions)):
inststr = instructions[i][0]
instparts = inststr.split()
op = instparts[0]
if colorize:
for j in range(len(instparts)):
instparts[j] = "<span style='color: {}'>{}</span>".format(self.palette[j], instparts[j])
# Add spaces to pad to 20 chars.
# (Can't use ljust because of added <span> chars.)
inststr = " ".join(instparts) + (" " * (max_inst_width - len(inststr)))
# rjust() adds leading 0s if needed.
instbin = bin(instructions_bin[i])[2:].rjust(16, '0')
instbinparts = []
j = 0
sizes = self.instructions[op]['sizes']
for size in sizes:
part = instbin[j:j+size]
instbinparts.append(part)
j += size
if colorize:
for j in range(len(sizes)):
instbinparts[j] = "<span style='color: {}'>{}</span>".format(self.palette[j], instbinparts[j])
# Add spaces between and after all parts and padding to 20 chars,
# accounting for bits of the instruction and spaces between parts.
# (Can't use ljust because of added <span> chars.)
instbinstr = " ".join(instbinparts) + (" " * (20 - self.inst_size - (len(sizes)-1)))
insthex = "{:04x}".format(instructions_bin[i])
if i in linelabels:
ret += linelabels[i] + ":\n"
# (Can't use format string justification because of added <span> chars.)
ret += "{:3}: {} {} {}\n".format(i, inststr, instbinstr, insthex)
return ret
def output_bin(self, filename, bytes):
"""Create a binary image file for the given bytes."""
with open(filename, 'wb') as f:
f.write(bytes)
def output_logisim_img(self, filename, bytes):
"""Create a Logisim memory image file for the given bytes."""
file_header = "v2.0 raw\n" # header required by Logisim to read memory image files
with open(filename, 'w') as f:
f.write(file_header)
f.write(" ".join("{:02x}".format(byte) for byte in bytes))
def output_sim_bin(self, filename, words):
"""Create a 256sim memory image file for the given bytes."""
with open(filename, 'w') as f:
f.write(" ".join("{:04x}".format(word) for word in words))
f.write("\n")
def assemble_file(self, filename, format, outfiles):
"""Fully assemble a Logisim memory image file containing CS256 ISA assembly code."""
self.report_inf("Assembling", filename)
with open(filename) as f:
lines = f.readlines()
(instructions, instructions_bin) = self.assemble_lines(lines)
print(self.prettyprint_assembly(instructions, instructions_bin))
bytes_low = bytes(word % 256 for word in instructions_bin)
bytes_high = bytes(word // 256 for word in instructions_bin)
if format == "bin":
self.output_bin(outfiles[0], bytes_low)
self.output_bin(outfiles[1], bytes_high)
elif format == "256sim":
self.output_sim_bin(outfiles[0], instructions_bin)
elif format == "logisim":
self.output_logisim_img(outfiles[0], bytes_low)
self.output_logisim_img(outfiles[1], bytes_high)
self.report_inf("Generated", ", ".join(outfiles))
def report_err(self, msg, data=""):
raise AssemblerException(msg, data, self.cur_lineno, self.cur_inst)
def report_inf(self, msg, data=""):
self.info_callback( (msg, data) )
| liffiton/256asm | assembler.py | Python | mit | 14,120 |
# -*- coding: utf-8 -*-
"""Module for constructing <head> tag."""
from __future__ import absolute_import
from ...templates.html.tags import head
class Head(object):
"""Class for constructing <head> tag.
Args:
text (str): Specifies the head text. (As in <head>{text}</head>)
.. versionadded:: 0.3.0
"""
def __init__(self, text=None):
# TODO: Add the ability to validate which inner tags can go into the
# <head> tag.
self.tag = 'head'
self.values = {'text': text}
def construct(self):
"""Returns the constructed tag <head>."""
return head.render(self.values)
| bharadwajyarlagadda/korona | korona/html/tags/head.py | Python | mit | 645 |
"""
EventWall
Development Server Script
"""
from os import environ
from eventwall import app
if __name__ == '__main__':
HOST = environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT)
| stevenmirabito/eventwall | runserver.py | Python | mit | 304 |
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
train_subset = 10000
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 801
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
| dashmoment/moxa_ai_training | tutorial/01_DNN/dnn_SGD.py | Python | mit | 4,015 |
from . import app
from flask import Markup
import mistune as md
@app.template_filter()
def markdown(text):
return Markup(md.markdown(text,escape=True))
@app.template_filter()
def dateformat(date, format):
if not date:
return None
return date.strftime(format).lstrip('0')
@app.template_filter()
def datetimeformat(value, format='%M:%S / %m-%s'):
return value.strftime(format)
| j10sanders/crossword | crossword/filters.py | Python | mit | 406 |
class ValidWordAbbr(object):
def __init__(self, dictionary):
"""
initialize your data structure here.
:type dictionary: List[str]
"""
self.word_dict={}
for word in dictionary:
key=self.create_key(word)
self.word_dict[key]=self.word_dict.get(key,[])+[word]
def isUnique(self, word):
"""
check if a word is unique.
:type word: str
:rtype: bool
"""
key=self.create_key(word)
# no abbrivation
if len(key)< 3:
return True
if key not in self.word_dict:
return True
else:
#if the word list has only one iterm
if len(self.word_dict[key]) ==1 and word in self.word_dict[key]:
return True
return False
def create_key(self,word):
if not word:
return ""
if len(word)< 3:
return word
return word[0]+str(len(word)-2)+word[-1]
# Your ValidWordAbbr object will be instantiated and called as such:
# vwa = ValidWordAbbr(dictionary)
# vwa.isUnique("word")
# vwa.isUnique("anotherWord") | Tanych/CodeTracking | 288-Unique-Word-Abbreviation/solution.py | Python | mit | 1,176 |
import logging
import numpy
from OpenGL.GL import *
from PIL import Image
from mgl2d.math.vector2 import Vector2
logger = logging.getLogger(__name__)
class Texture(object):
@classmethod
def load_from_file(cls, filename, mode=GL_RGBA):
image = Image.open(filename)
logger.debug(f'Loading \'{filename}\' mode:{image.mode}')
if mode == GL_RGBA and image.mode != 'RGBA':
image_new = image.convert('RGBA')
image.close()
image = image_new
texture = Texture()
texture._size.x = image.size[0]
texture._size.y = image.size[1]
# pixels = numpy.array([component for pixel in image.getdata() for component in pixel], dtype=numpy.uint8)
# mode_to_num_bytes = {'P': 1, 'RGB': 3, 'RGBA': 4}
# numpy.array(image.getdata(), numpy.uint8).reshape(image.size[1], image.size[0], mode_to_num_bytes[image.mode])
pixels = image.tobytes("raw", "RGBA", 0, 1)
texture.texture_id = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texture.texture_id)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, mode, texture.width, texture.height, 0, mode, GL_UNSIGNED_BYTE, pixels)
glBindTexture(GL_TEXTURE_2D, 0)
image.close()
return texture
@classmethod
def create_with_size(cls, width, height, mode=GL_RGBA):
image = Image.new(mode=mode, size=(width, height))
pixels = numpy.array([component for pixel in image.getdata() for component in pixel], dtype=numpy.uint8)
texture = Texture()
texture._size.x = width
texture._size.y = height
texture.texture_id = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texture.texture_id)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, mode, width, height, 0, mode, GL_UNSIGNED_BYTE, pixels)
glBindTexture(GL_TEXTURE_2D, 0)
image.close()
return texture
@classmethod
def create_with_data(cls, width, height, texture_id):
texture = Texture()
texture._size.x = width
texture._size.y = height
texture.texture_id = texture_id
return texture
def __init__(self):
self._size = Vector2()
self.texture_id = 0
def bind(self):
glBindTexture(GL_TEXTURE_2D, self.texture_id)
def unbind(self):
glBindTexture(GL_TEXTURE_2D, 0)
@property
def width(self):
return self._size.x
@property
def height(self):
return self._size.y
@property
def size(self):
return self._size
def _next_power_of_two(self, n):
return 2 ** (n - 1).bit_length()
| maxfish/mgl2d | mgl2d/graphics/texture.py | Python | mit | 3,207 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import xlsxwriter
from mongodb import db, conn
def export_to_xlsx(skip_num, limit_num):
# create a workbook
book_name = u'百灵威数据采集_' + str(skip_num//limit_num+1) + '.xlsx'
workbook = xlsxwriter.Workbook(book_name) # create a worksheet and export data
worksheet = workbook.add_worksheet(u'百灵威')
worksheet.write(0, 0, u'英文名称')
worksheet.write(0, 1, u'中文名称')
worksheet.write(0, 2, u'纯度')
worksheet.write(0, 3, u'CAS')
worksheet.write(0, 4, u'MDL')
worksheet.write(0, 5, u'产品编号')
worksheet.write(0, 6, u'分子式')
worksheet.write(0, 7, u'规格')
worksheet.write(0, 8, u'单价')
worksheet.write(0, 9, u'预计发货期')
row = 1
col = 0
# remember to close the connection
products = db.blw_product_detail.find(timeout=False).skip(skip_num).limit(limit_num)
for item in products:
worksheet.write(row, col, item.get('en_name', ''))
worksheet.write(row, col + 1, item.get('name', ''))
worksheet.write(row, col + 2, item.get('pure', ''))
worksheet.write(row, col + 3, item.get('cas', ''))
worksheet.write(row, col + 4, item.get('mdl', ''))
worksheet.write(row, col + 5, item.get('item_num', '').replace(' ', ''))
worksheet.write(row, col + 6, item.get('formula', ''))
worksheet.write(row, col + 7, item.get('spec', ''))
worksheet.write(row, col + 8, item.get('price', ''))
worksheet.write(row, col + 9, item.get('deliver_time', ''))
row += 1
# close the connection because has set timeout to False
conn.close()
workbook.close()
if __name__ == '__main__':
# amount = 238979
skip_num = 0
limit_num = 5000
while skip_num <= 240000:
export_to_xlsx(skip_num=skip_num, limit_num=limit_num)
skip_num += 5000 | mutoulbj/chem_spider | chem_spider/blw_export_xlsx.py | Python | mit | 1,905 |
from django.contrib import admin
from bootcamp.articles.models import Article
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
list_display = ("title", "user", "status")
list_filter = ("user", "status", "timestamp")
| vitorfs/bootcamp | bootcamp/articles/admin.py | Python | mit | 240 |
#!/usr/bin/env python3
from copy import deepcopy
from datetime import datetime
import urllib.request
import urllib.error
import urllib.parse
from urllib.parse import urljoin
import json
import re
from html.parser import HTMLParser
class GenericParser(HTMLParser):
"""Basic tools to collect information from a single webpage (→ self._url)"""
def __init__(self, url):
super().__init__()
self._url = url
self.__template_item_info = {
"title": "",
"link": None,
"description": "",
"source": self._url,
"pubDate": None,
}
self._list_url_info = []
self._act_info = deepcopy(self.__template_item_info)
def _attrs_to_dict(self, attrs_list):
"""Converts HTMLParser's attrs list to an dict. Thus, a check,
whether a attribute exists, is simplified via has_key()"""
attrs_dict = {}
for key, value in attrs_list:
attrs_dict[key] = value
return attrs_dict
def _download_page(self):
request = urllib.request.Request(
self._url, headers={"User-Agent": "Mozilla/5.0", "Accept-Language": "en"}
)
try:
response = urllib.request.urlopen(request).read()
except (urllib.error.HTTPError, urllib.error.URLError) as error:
print(error, "on", self._url)
return ""
else:
return str(response, "utf-8")
def _parse_URLs(self):
content = self._download_page()
if not content:
return
self.feed(content)
def _next_url_info(self):
self._list_url_info.append(deepcopy(self._act_info))
self._act_info = deepcopy(self.__template_item_info)
def rm_whitespace(self, string_whitespace):
return " ".join(string_whitespace.split())
def getData(self):
return self._list_url_info
def handle_starttag(self, tag, attrs):
pass
def handle_data(self, data):
pass
def handle_endtag(self, tag):
pass
class DescriptionParser:
"""
Downloads url, all content of <main> can be retrieved with `getData`.
Helps to get a description of an feed entry.
"""
def __init__(self, url):
self.page = GenericParser(url)._download_page()
re_flags = re.DOTALL | re.IGNORECASE
matches = re.search(r"<main>.+</main>", self.page, re_flags)
self.page = matches.group(0)
# Try to remove some scripts. Not secure at all.
self.page = re.sub(r"<script.*?</script>", "", self.page, flags=re_flags)
def getData(self):
return self.page
class SoundcloudDescriptionParser(GenericParser):
def __init__(self, url):
super().__init__(url)
self._inside_article = False
self._description_text = ""
self._parse_URLs()
def getData(self):
return self._description_text
def handle_starttag(self, tag, attrs):
if tag == "article":
self._inside_article = True
return
if tag == "meta" and self._inside_article:
attrs = self._attrs_to_dict(attrs)
if (
"itemprop" in attrs
and attrs["itemprop"] == "description"
and "content" in attrs
):
self._description_text = attrs["content"]
def handle_endtag(self, tag):
if tag == "article" and self._inside_article:
self._inside_article = False
class SoundcloudParser(GenericParser):
def __init__(self, url):
super().__init__(url)
self._found_track = False
self._collect_pubdate = False
self._pubdate_string = ""
self._collect_title = False
self._title_string = ""
self._parse_URLs()
for elem in self._list_url_info:
parser = SoundcloudDescriptionParser(elem["link"])
elem["description"] = parser.getData()
def __str__(self):
return "Soundcloud"
def _next_url_info(self):
GenericParser._next_url_info(self)
self._pubdate_string = ""
self._title_string = ""
def handle_starttag(self, tag, attrs):
attrs = self._attrs_to_dict(attrs)
if tag == "article" and "class" in attrs and attrs["class"] == "audible":
self._found_track = True
if self._found_track:
if tag == "a" and "itemprop" in attrs and attrs["itemprop"] == "url":
self._act_info["link"] = urljoin(self._url, attrs["href"])
self._collect_title = True
if tag == "time" and "pubdate" in attrs:
self._collect_pubdate = True
def handle_data(self, data):
if self._collect_pubdate:
self._pubdate_string += data
if self._collect_title:
self._title_string += data
def handle_endtag(self, tag):
if tag == "article" and self._found_track:
self._found_track = False
self._next_url_info()
if tag == "a" and self._collect_title:
self._act_info["title"] = self.rm_whitespace(self._title_string)
self._collect_title = False
if tag == "time" and self._collect_pubdate:
self._collect_pubdate = False
try:
self._act_info["pubDate"] = datetime.fromisoformat(
# strip last Z
self._pubdate_string[:-1]
)
except ValueError as e:
self._act_info["pubDate"] = datetime.strptime(
self._pubdate_string, "%Y/%m/%d %H:%M:%S%z"
)
class IdParser(GenericParser):
def __init__(self, url):
super().__init__(url)
self._id_found = False
self._tag = "a"
self._id = "link_archive"
self._parse_URLs()
def __str__(self):
return "ID"
def handle_starttag(self, tag, attrs):
if tag == self._tag:
attrs = self._attrs_to_dict(attrs)
if attrs.get("id") == self._id:
self._id_found = True
link = urljoin(self._url, attrs["href"])
self._act_info["link"] = link
self._act_info["pubDate"] = datetime.now()
elif tag == "img" and self._id_found:
attrs = self._attrs_to_dict(attrs)
src = urljoin(self._url, attrs["src"])
self._act_info["description"] = f'<img src="{ src }" />'
def handle_endtag(self, tag):
if tag == self._tag and self._id_found:
self._id_found = False
self._next_url_info()
class SzParser(GenericParser):
def __init__(self, url):
super().__init__(url)
self.__found_entry = False
self._parse_URLs()
for elem in self._list_url_info:
parser = DescriptionParser(elem["link"])
elem["description"] = parser.getData()
def __str__(self):
return "SZ"
def handle_starttag(self, tag, attrs):
if tag == "a":
attrs = self._attrs_to_dict(attrs)
if attrs.get("class") == "sz-teaser":
self.__found_entry = True
self._act_info["link"] = attrs["href"]
self._act_info["pubDate"] = datetime.now()
def handle_data(self, data):
if self.__found_entry:
self._act_info["title"] += data
def handle_endtag(self, tag):
if tag == "a" and self.__found_entry:
self.__found_entry = False
self._act_info["title"] = self.rm_whitespace(self._act_info["title"])
self._next_url_info()
class FunkParser(GenericParser):
def __init__(self, channel_id):
self.channel_id = channel_id
url = f"https://www.funk.net/data/videos/byChannelAlias/{channel_id}?page=0&size=10"
super().__init__(url)
self.json_response = self._download_page()
self.handle_json()
def __str__(self):
return "Funk"
def handle_json(self):
try:
python_struct = json.loads(self.json_response)
except json.decoder.JSONDecodeError as error:
print(error, "on", self._url)
return
for element in python_struct["list"]:
self._act_info["title"] = element["title"]
self._act_info["description"] = element["shortDescription"]
video_alias = element["alias"]
link = f"https://www.funk.net/channel/{self.channel_id}/{video_alias}"
self._act_info["link"] = link
# f.e. 2020-01-13T19:09:30.000+0000
pubdate = datetime.strptime(
element["publicationDate"], "%Y-%m-%dT%H:%M:%S.000%z"
)
self._act_info["pubDate"] = pubdate
self._next_url_info()
| chris34/HTML2RSS | lib/Parser.py | Python | mit | 8,841 |
from intent.alignment.Alignment import Alignment
import json
def aln_to_json(aln, reverse=False):
"""
:type aln: Alignment
"""
ret_dir = {}
if not reverse:
for tgt in aln.all_tgt():
ret_dir[tgt] = aln.tgt_to_src(tgt)
else:
for src in aln.all_src():
ret_dir[src] = aln.src_to_tgt(src)
return ret_dir | xigt/yggdrasil | yggdrasil/utils.py | Python | mit | 367 |
import collections
from pymongo import MongoClient, ReturnDocument
from pymongo.errors import ConfigurationError
from thingy import classproperty, DatabaseThingy, registry
from mongo_thingy.cursor import Cursor
class Thingy(DatabaseThingy):
"""Represents a document in a collection"""
_client = None
_collection = None
_collection_name = None
_cursor_cls = Cursor
@classproperty
def _table(cls):
return cls._collection
@classproperty
def _table_name(cls):
return cls._collection_name
@classproperty
def table_name(cls):
return cls.collection_name
@classproperty
def collection(cls):
return cls.get_collection()
@classproperty
def collection_name(cls):
return cls.get_table_name()
@classproperty
def client(cls):
return cls.get_client()
@classmethod
def _get_client(cls, database):
return database.client
@classmethod
def _get_database(cls, collection, name):
if collection:
return collection.database
if cls._client and name:
return cls._client[name]
raise AttributeError("Undefined database.")
@classmethod
def _get_table(cls, database, table_name):
return database[table_name]
@classmethod
def _get_database_name(cls, database):
return database.name
@classmethod
def _get_table_name(cls, table):
return table.name
@classmethod
def get_client(cls):
if cls._client:
return cls._client
return cls._get_client(cls.database)
@classmethod
def get_collection(cls):
return cls.get_table()
@classmethod
def add_index(cls, keys, **kwargs):
kwargs.setdefault("background", True)
if not hasattr(cls, "_indexes"):
cls._indexes = []
cls._indexes.append((keys, kwargs))
@classmethod
def count(cls, filter=None, *args, **kwargs):
if filter is None:
filter = {}
return cls.collection.count_documents(filter, *args, **kwargs)
@classmethod
def connect(cls, *args, **kwargs):
cls._client = MongoClient(*args, **kwargs)
try:
cls._database = cls._client.get_database()
except ConfigurationError:
pass
@classmethod
def create_index(cls, keys, **kwargs):
cls.add_index(keys, **kwargs)
cls.collection.create_index(keys, **kwargs)
@classmethod
def create_indexes(cls):
if hasattr(cls, "_indexes"):
for keys, kwargs in cls._indexes:
cls.collection.create_index(keys, **kwargs)
@classmethod
def disconnect(cls, *args, **kwargs):
cls.client.close()
cls._client = None
cls._database = None
@classmethod
def distinct(cls, *args, **kwargs):
return cls.collection.distinct(*args, **kwargs)
@classmethod
def find(cls, *args, **kwargs):
return cls._cursor_cls(cls.collection, thingy_cls=cls, *args, **kwargs)
@classmethod
def find_one(cls, filter=None, *args, **kwargs):
if filter is not None and not isinstance(filter, collections.Mapping):
filter = {"_id": filter}
cursor = cls.find(filter, *args, **kwargs)
return cursor.first()
@classmethod
def find_one_and_replace(cls, *args, **kwargs):
kwargs.setdefault("return_document", ReturnDocument.AFTER)
result = cls.collection.find_one_and_replace(*args, **kwargs)
if result is not None:
return cls(result)
@property
def id(self):
return self.__dict__.get("id") or self._id
@id.setter
def id(self, value):
if "id" in self.__dict__:
self.__dict__["id"] = value
else:
self._id = value
def save(self, force_insert=False):
data = self.__dict__
if self.id is not None and not force_insert:
filter = {"_id": self.id}
self.get_collection().replace_one(filter, data, upsert=True)
else:
self.get_collection().insert_one(data)
return self
def delete(self):
return self.get_collection().delete_one({"_id": self.id})
connect = Thingy.connect
disconnect = Thingy.disconnect
def create_indexes():
"""Create indexes registered on all :class:`Thingy`"""
for cls in registry:
if issubclass(cls, Thingy):
cls.create_indexes()
__all__ = ["Thingy", "connect", "create_indexes"]
| numberly/mongo-thingy | mongo_thingy/__init__.py | Python | mit | 4,532 |
'''
@author: Jeremy Bayley
@email : [email protected]
@websight : TheEnvironmentGuy.com
Friday March 25 2016
Blender 2.77
-*- coding: utf-8 -*-
'''
import bpy
import ImperialPrimitives as ip
inch = 0.0254
foot = 0.3048
stud_length = foot*8
ceiling_height = foot*8
wall_length = foot*6
drywall_size = [foot*4, inch/2, foot*8]
woodstock_size = [inch*2.5, foot*8, inch*1.5]
stud_distance = inch*16
def Main():
bpy.ops.mesh.primitive_cube_add()
ip.SetupObject(size=[11,2,3])
if __name__ == '__main__':
Main()
| TheEnvironmentGuy/teg-inhouse | blenderWallBuilder/WallBuilder.py | Python | mit | 537 |
import random
import maya.cmds as m
FILENAMES = [
'',
'%FXPT_LOCATION%/src/dirA/testTex_exit.png',
'%INVALID_ENV_VAR%/fxpt/fx_texture_manager/icons/copy.png',
'//BLACK/C$/__testTextureManager__/src/dirB/dirB1/retarget.png',
'C:/__testTextureManagerExternal__/AAA/testTex_exit.png',
'C:/__testTextureManager__/src/dirB/dirB1/copy.png',
'C:/__testTextureManager__/src/dirB/dirB1/retarget.png',
'some/path/tex.png',
'sourceimages/testTex_exit.png',
]
CUBE_SPACING = 2
def createMayaNetwork(filename):
lambert = m.shadingNode('lambert', asShader=True)
sg = m.sets(renderable=True, noSurfaceShader=True, empty=True, name=lambert + 'SG')
m.connectAttr(lambert + '.outColor', sg + '.surfaceShader', force=True)
fileNode = m.shadingNode('file', asTexture=True)
placement = m.shadingNode('place2dTexture', asUtility=True)
m.connectAttr(placement + '.coverage', fileNode + '.coverage', force=True)
m.connectAttr(placement + '.translateFrame', fileNode + '.translateFrame', force=True)
m.connectAttr(placement + '.rotateFrame', fileNode + '.rotateFrame', force=True)
m.connectAttr(placement + '.mirrorU', fileNode + '.mirrorU', force=True)
m.connectAttr(placement + '.mirrorV', fileNode + '.mirrorV', force=True)
m.connectAttr(placement + '.stagger', fileNode + '.stagger', force=True)
m.connectAttr(placement + '.wrapU', fileNode + '.wrapU', force=True)
m.connectAttr(placement + '.wrapV', fileNode + '.wrapV', force=True)
m.connectAttr(placement + '.repeatUV', fileNode + '.repeatUV', force=True)
m.connectAttr(placement + '.offset', fileNode + '.offset', force=True)
m.connectAttr(placement + '.rotateUV', fileNode + '.rotateUV', force=True)
m.connectAttr(placement + '.noiseUV', fileNode + '.noiseUV', force=True)
m.connectAttr(placement + '.vertexUvOne', fileNode + '.vertexUvOne', force=True)
m.connectAttr(placement + '.vertexUvTwo', fileNode + '.vertexUvTwo', force=True)
m.connectAttr(placement + '.vertexUvThree', fileNode + '.vertexUvThree', force=True)
m.connectAttr(placement + '.vertexCameraOne', fileNode + '.vertexCameraOne', force=True)
m.connectAttr(placement + '.outUV', fileNode + '.uv', force=True)
m.connectAttr(placement + '.outUvFilterSize', fileNode + '.uvFilterSize', force=True)
m.connectAttr(fileNode + '.outColor', lambert + '.color', force=True)
m.setAttr(fileNode + '.fileTextureName', filename, typ='string')
return sg
def createMRNetwork(filename):
miaMaterial = m.shadingNode('mia_material_x', asShader=True)
sg = m.sets(renderable=True, noSurfaceShader=True, empty=True, name=miaMaterial + 'SG')
m.connectAttr(miaMaterial + '.message', sg + '.miMaterialShader', force=True)
m.connectAttr(miaMaterial + '.message', sg + '.miPhotonShader', force=True)
m.connectAttr(miaMaterial + '.message', sg + '.miShadowShader', force=True)
texLookup = m.shadingNode('mib_texture_lookup', asTexture=True)
mrTex = m.shadingNode('mentalrayTexture', asTexture=True)
m.connectAttr(mrTex + '.message', texLookup + '.tex', force=True)
m.connectAttr(texLookup + '.outValue', miaMaterial + '.diffuse', force=True)
m.connectAttr(texLookup + '.outValueA', miaMaterial + '.diffuseA', force=True)
m.setAttr(mrTex + '.fileTextureName', filename, typ='string')
return sg
shadingNetworkConstructors = [
createMayaNetwork,
createMRNetwork
]
def createCube(x, z):
cube = m.polyCube(ch=0)[0]
m.move(x, 0, z, cube, absolute=True)
return cube
def assignRandomShadingNetwork(faces):
filename = random.choice(FILENAMES)
networkConstructor = random.choice(shadingNetworkConstructors)
sg = networkConstructor(filename)
m.sets(faces, e=True, forceElement=sg)
def run(xCount, zCount):
for x in range(xCount):
for z in range(zCount):
cube = createCube(x * CUBE_SPACING, z * CUBE_SPACING)
assignRandomShadingNetwork(cube + '.f[0:2]')
assignRandomShadingNetwork(cube + '.f[3:5]')
| theetcher/fxpt | fxpt/fx_texture_manager/tests/create_test_scene.py | Python | mit | 4,062 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EntitiesBatchResultItem(Model):
"""EntitiesBatchResultItem.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Unique, non-empty document identifier.
:type id: str
:ivar entities: Recognized entities in the document.
:vartype entities:
list[~azure.cognitiveservices.language.textanalytics.models.EntityRecord]
:param statistics: (Optional) if showStats=true was specified in the
request this field will contain information about the document payload.
:type statistics:
~azure.cognitiveservices.language.textanalytics.models.DocumentStatistics
"""
_validation = {
'entities': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'entities': {'key': 'entities', 'type': '[EntityRecord]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(self, *, id: str=None, statistics=None, **kwargs) -> None:
super(EntitiesBatchResultItem, self).__init__(**kwargs)
self.id = id
self.entities = None
self.statistics = statistics
| Azure/azure-sdk-for-python | sdk/cognitiveservices/azure-cognitiveservices-language-textanalytics/azure/cognitiveservices/language/textanalytics/models/entities_batch_result_item_py3.py | Python | mit | 1,684 |
import random
from numpy import zeros, sign
from math import exp, log
from collections import defaultdict
import argparse
kSEED = 1701
kBIAS = "BIAS_CONSTANT"
random.seed(kSEED)
def sigmoid(score, threshold=20.0):
"""
Note: Prevents overflow of exp by capping activation at 20.
:param score: A real valued number to convert into a number between 0 and 1
"""
if abs(score) > threshold:
score = threshold * sign(score)
activation = exp(score)
return activation / (1.0 + activation)
class Example:
"""
Class to represent a logistic regression example
"""
def __init__(self, label, words, vocab, df):
"""
Create a new example
:param label: The label (0 / 1) of the example
:param words: The words in a list of "word:count" format
:param vocab: The vocabulary to use as features (list)
"""
self.nonzero = {}
self.y = label
self.x = zeros(len(vocab))
for word, count in [x.split(":") for x in words]:
if word in vocab:
assert word != kBIAS, "Bias can't actually appear in document"
self.x[vocab.index(word)] += float(count)
self.nonzero[vocab.index(word)] = word
self.x[0] = 1
class LogReg:
def __init__(self, num_features, learning_rate=0.05):
"""
Create a logistic regression classifier
:param num_features: The number of features (including bias)
:param learning_rate: How big of a SG step we take
"""
self.beta = zeros(num_features)
self.learning_rate = learning_rate
def progress(self, examples):
"""
Given a set of examples, compute the probability and accuracy
:param examples: The dataset to score
:return: A tuple of (log probability, accuracy)
"""
logprob = 0.0
num_right = 0
for ii in examples:
p = sigmoid(self.beta.dot(ii.x))
if ii.y == 1:
logprob += log(p)
else:
logprob += log(1.0 - p)
# Get accuracy
if abs(ii.y - p) < 0.5:
num_right += 1
return logprob, float(num_right) / float(len(examples))
def sg_update(self, train_example):
"""
Compute a stochastic gradient update to improve the log likelihood.
:param train_example: The example to take the gradient with respect to
:return: The current vector of parameters
"""
# Your code here
return self.beta
def read_dataset(positive, negative, vocab, test_proportion=.1):
"""
Reads in a text dataset with a given vocabulary
:param positive: Positive examples
:param negative: Negative examples
:param vocab: A list of vocabulary words
:param test_proprotion: How much of the data should be reserved for test
"""
# You should not need to modify this function
df = [float(x.split("\t")[1]) for x in open(vocab, 'r') if '\t' in x]
vocab = [x.split("\t")[0] for x in open(vocab, 'r') if '\t' in x]
assert vocab[0] == kBIAS, \
"First vocab word must be bias term (was %s)" % vocab[0]
train = []
test = []
for label, input in [(1, positive), (0, negative)]:
for line in open(input):
ex = Example(label, line.split(), vocab, df)
if random.random() <= test_proportion:
test.append(ex)
else:
train.append(ex)
# Shuffle the data so that we don't have order effects
random.shuffle(train)
random.shuffle(test)
return train, test, vocab
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("--step", help="Initial SG step size",
type=float, default=0.1, required=False)
argparser.add_argument("--positive", help="Positive class",
type=str, default="positive", required=False)
argparser.add_argument("--negative", help="Negative class",
type=str, default="negative", required=False)
argparser.add_argument("--vocab", help="Vocabulary that can be features",
type=str, default="vocab", required=False)
argparser.add_argument("--passes", help="Number of passes through train",
type=int, default=1, required=False)
args = argparser.parse_args()
train, test, vocab = read_dataset(args.positive, args.negative, args.vocab)
print("Read in %i train and %i test" % (len(train), len(test)))
# Initialize model
lr = LogReg(len(vocab), args.step)
# Iterations
update_number = 0
for pp in range(args.passes):
for ii in train:
update_number += 1
lr.sg_update(ii)
if update_number % 5 == 1:
train_lp, train_acc = lr.progress(train)
ho_lp, ho_acc = lr.progress(test)
print("Update %i\tTP %f\tHP %f\tTA %f\tHA %f" %
(update_number, train_lp, ho_lp, train_acc, ho_acc))
| Pinafore/ds-hw | logreg/logreg.py | Python | mit | 5,137 |
from fnmatch import fnmatch
from os import listdir
from unittest import TestCase
from pylegos.core import FileUtils
class TestFileUtils(TestCase):
Sut = FileUtils()
def test_pathUtils(self):
pd = self.Sut.getParentDir(filePath=__file__)
self.assertEqual('/Users/gchristiansen/projects/pyLegos/tests/velexio/pylegos/core', pd)
pd = self.Sut.getAppBase()
self.assertEqual('/Users/gchristiansen/projects/pyLegos/tests/velexio/pylegos',pd)
self.assertTrue(self.Sut.dirExists('/Users/gchristiansen/projects/pyLegos'),'Method dirExists determined existing dir does not exist')
self.assertFalse(self.Sut.dirExists('/Users/gchristiansen/projects/pyLegos/xxxpylegos'),'Method dirExists returned True on a non-existent directory')
self.assertFalse(self.Sut.dirExists('/Users/gchristiansen/projects/pyLegos/pylegos/tests/test_FileUtils.py'),'Method dirExists returned True on a check against a file')
self.assertTrue(self.Sut.fileExists(__file__), 'Method fileExists returned false file that DOES exist')
self.assertFalse(self.Sut.fileExists('/Users/gchristiansen/projects/pyLegos/tests'),'Method fileExists returned true on dir')
self.assertFalse(self.Sut.fileExists('/Users/gchristiansen/projects/pyLegos/tests/xxxx.py'),'Method fileExists returned true file that DOES NOT exist')
# Create some tmp dirs
self.Sut.removeDirMatch(dirPath='/tmp', pattern='conf*')
self.Sut.createDirectory('/tmp/conf')
self.Sut.createDirectory('/tmp/config')
self.assertEqual(len(self.Sut.getDirMatches(baseDir='/tmp',pattern='conf*')),2,'Method getDirMatches returns more than expected')
self.assertEqual(self.Sut.getDirMatches(baseDir='/tmp',pattern='conf')[0],'conf','Method getDirMatches does not return full path')
def test_DeleteFiles(self):
testFiles = ['/tmp/app_test1', '/tmp/app_test2']
for tf in testFiles:
self.Sut.touchFile(tf)
self.Sut.deleteFiles(baseDir='/tmp', pattern='app*')
for file in listdir('/tmp'):
if fnmatch(file, 'app*'):
self.fail()
def test_GetFileMatches(self):
testFiles = ['/tmp/app_test1', '/tmp/app_test2', '/tmp/vapp_test1']
for tf in testFiles:
self.Sut.touchFile(tf)
fileList = self.Sut.getFileMatches(baseDir='/tmp', pattern='app*')
self.assertEqual(len(fileList), 2)
| velexio/pyLegos | tests/test_FileUtils.py | Python | mit | 2,450 |
# -*- coding: utf-8 -*-
import numpy
import time
import os
import magdynlab.instruments
import magdynlab.controllers
import magdynlab.data_types
import threading_decorators as ThD
import matplotlib.pyplot as plt
@ThD.gui_safe
def Plot_ColorMap(Data):
f = plt.figure('ZxH', (5, 4))
extent = numpy.array([Data['h'].min(),
Data['h'].max(),
Data['f'].min()/1E3,
Data['f'].max()/1E3])
if not(f.axes):
plt.subplot()
ax = f.axes[0]
ax.clear()
ax.imshow(Data['ColorMap'].T,
aspect='auto',
origin='lower',
extent=extent)
ax.set_xlabel('Field (Oe)')
ax.set_ylabel('Freq (kHz)')
f.tight_layout()
f.canvas.draw()
@ThD.gui_safe
def Plot_ColorMapTime(Data):
f = plt.figure('Zxt', (5, 4))
extent = numpy.array([Data['t'].min(),
Data['t'].max(),
Data['f'].min()/1E3,
Data['f'].max()/1E3])
if not(f.axes):
plt.subplot()
ax = f.axes[0]
ax.clear()
ax.imshow(Data['ColorMap'].T,
aspect='auto',
origin='lower',
extent=extent)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Freq (kHz)')
f.tight_layout()
f.canvas.draw()
@ThD.gui_safe
def Plot_ResFreq(Data):
f = plt.figure('ResFreq', (5, 4))
if not(f.axes):
plt.subplot()
ax = f.axes[0]
ymax = numpy.nanmax(Data['ResFreq'])/1E3
ymin = numpy.nanmin(Data['ResFreq'])/1E3
dy = numpy.max([ymax - ymin, 1E-6])
if not(ax.lines):
ax.plot([],[],'b.-')
ax.set_xlim([Data['t'].min(), Data['t'].max()])
ax.set_ylim([ymax+dy, ymin-dy])
line = ax.lines[-1]
line.set_data(Data['t'], Data['ResFreq']/1E3)
ax.set_xlabel('Time (s)')
ax.set_ylabel('ResFreq (kHz)')
ax.grid(True)
#check Y scale
yc = (ymax + ymin)/2
ymin, ymax = ax.get_ylim()
ymax = numpy.max([yc + dy*1.1/2, ymax])
ymin = numpy.min([yc - dy*1.1/2, ymin])
ax.set_ylim([ymin, ymax])
f.tight_layout()
f.canvas.draw()
class ZxH(object):
def __init__(self, ResouceNames={}):
logFile = os.path.expanduser('~/MagDynLab.log')
defaultRN = dict(RN_Kepco = 'TCPIP0::192.168.13.7::KepcoBOP2020::INSTR',
RN_IA = 'TCPIP::192.168.13.3::INSTR')
defaultRN.update(ResouceNames)
RN_Kepco = defaultRN['RN_Kepco']
RN_IA = defaultRN['RN_IA']
PowerSource = magdynlab.instruments.KEPCO_BOP(ResourceName=RN_Kepco,
logFile=logFile)
IA = magdynlab.instruments.KEYSIGHT_E4990A(ResourceName=RN_IA,
logFile=logFile)
self.IAC = magdynlab.controllers.IA_Controller(IA)
self.FC = magdynlab.controllers.FieldController(PowerSource)
self.FC.Kepco.Voltage = 5
#Experimental/plot data
self.Data = magdynlab.data_types.DataContainer()
self.Data.file_id = '.ZxH_Raw' #Z vs hs vs fs
self.DataTime = magdynlab.data_types.DataContainer()
self.DataTime.file_id = '.Zxt_Raw' #Z vs ts vs fs
self.ColorMapData = magdynlab.data_types.DataContainer()
self.ColorMapData.file_id = '.ZxH_ColorMap' #|Z| vs hs vs fs
self.SaveFormat = 'npy'
self.Info = ''
self.PlotFunct = numpy.abs
def PlotColorMap(self, i=None):
Z_ref = self.PlotFunct(self.Data['Ref'])
if i is not None:
# Update up to i column
for j in range(i+1):
Z = self.PlotFunct(self.Data['Z'][j])
if self.Data['h'][0] > self.Data['h'][-1]:
j = -1 - j
self.ColorMapData['ColorMap'][j] = Z - Z_ref
else:
Z = self.PlotFunct(self.Data['Z'])
self.ColorMapData['ColorMap'] = Z - Z_ref[None,:]
if self.Data['h'][0] > self.Data['h'][-1]:
self.ColorMapData['ColorMap'] = Z[::-1]
Plot_ColorMap(self.ColorMapData)
def PlotColorMapTime(self, i=None):
Z_ref = self.PlotFunct(self.Data['Ref'])
if i is not None:
# Update up to i column
for j in range(i+1):
Z = self.PlotFunct(self.DataTime['Z'][j])
self.ColorMapData['ColorMap'][j] = Z - Z_ref
else:
Z = self.PlotFunct(self.DataTime['Z'])
self.ColorMapData['ColorMap'] = Z - Z_ref[None,:]
dt = self.DataTime['t'][1] - self.DataTime['t'][0]
if dt < 0:
dt = 1
self.ColorMapData['t'] = numpy.arange(0, len(self.DataTime['t'])) * dt
Plot_ColorMapTime(self.ColorMapData)
if i is not None:
# Update up to i column
for j in range(i+1):
posPeak = self.ColorMapData['ColorMap'][j].argmax()
self.ColorMapData['ResFreq'][j] = self.DataTime['f'][posPeak]
if i >= 1:
Plot_ResFreq(self.ColorMapData)
def MeasureRef(self):
self.Data['Ref'] = self.IAC.getRData(True)
@ThD.as_thread
def Measure(self, fields, file_name, hold_time=0.0):
self.Data['h'] = fields
self.Data['f'] = self.IAC.frequencies
data_shape = (len(self.Data['h']), len(self.Data['f']))
self.Data['Z'] = numpy.zeros(data_shape, dtype=complex)
self.Data.info = self.Info
self.ColorMapData['h'] = self.Data['h']
self.ColorMapData['f'] = self.Data['f']
self.ColorMapData['ColorMap'] = numpy.zeros(data_shape, dtype=float)
self.ColorMapData['ColorMap'] += numpy.nan
self.ColorMapData.info = self.Info
# Loop for each field
for i, h in enumerate(fields):
self.FC.setField(h)
time.sleep(hold_time)
self.Data['Z'][i] = self.IAC.getRData(True)
self.PlotColorMap(i)
ThD.check_stop()
if file_name is not None:
self.Data.save(file_name)
self.FC.TurnOff()
self.FC.Kepco.BEEP()
@ThD.as_thread
def MeasureVsTime(self, field, time_step, n_steps, file_name):
self.DataTime['t'] = numpy.zeros((n_steps))
self.DataTime['f'] = self.IAC.frequencies
data_shape = (len(self.DataTime['t']), len(self.DataTime['f']))
self.DataTime['Z'] = numpy.zeros(data_shape, dtype=complex)
self.ColorMapData['t'] = numpy.arange(0, n_steps)
self.ColorMapData['ResFreq'] = numpy.arange(0, n_steps) + numpy.nan
self.ColorMapData['f'] = self.DataTime['f']
self.ColorMapData['ColorMap'] = numpy.zeros(data_shape, dtype=float)
self.ColorMapData['ColorMap'] += numpy.nan
self.ColorMapData.info = self.Info
self.FC.setField(field)
# Loop for each field
for i in range(n_steps):
time.sleep(time_step)
self.DataTime['t'][i] = time.time()
self.DataTime['Z'][i] = self.IAC.getRData(True)
self.PlotColorMapTime(i)
ThD.check_stop()
self.DataTime.info = self.Info
if file_name is not None:
self.DataTime.save(file_name)
def Stop(self, TurnOff=True):
print('Stoping...')
self.FC.BEEP()
if self.Measure.thread is not None:
self.Measure.stop()
self.Measure.thread.join()
if self.MeasureVsTime.thread is not None:
self.MeasureVsTime.stop()
self.MeasureVsTime.thread.join()
time.sleep(1)
self.FC.BEEP()
time.sleep(0.1)
self.FC.BEEP()
print('DONE')
if TurnOff:
print('Turning field OFF')
self.FC.TurnOff()
print('DONE')
| Vrekrer/magdynlab | experiments/ZxH.py | Python | mit | 7,857 |
# Try to find vacancy defects: detects vacant places of host atoms
# Author: Evgeny Blokhin
from math import gcd
from functools import reduce
# hierarchy API: __order__ to apply classifier
__order__ = 20
def classify(tilde_obj):
if len(tilde_obj.info['elements']) < 2: return tilde_obj
elif tilde_obj.structures[-1].periodicity in [0, 1, 2]: return tilde_obj
tilde_obj.info['expanded'] = reduce(gcd, tilde_obj.info['contents'])
if sum(tilde_obj.info['contents']) / tilde_obj.info['expanded'] < 15: return tilde_obj # check for >= 15-atoms
gcds = []
for i in range(1, 3): # max 2 missing atoms of THE SAME type
for index in range(len(tilde_obj.info['contents'])):
chk_content = []
chk_content.extend(tilde_obj.info['contents'])
if tilde_obj.info['lack']: try_index = tilde_obj.info['elements'].index(tilde_obj.info['lack'])
else: try_index = index
chk_content[try_index] += i
gcds.append([try_index, i, reduce(gcd, chk_content)])
if tilde_obj.info['lack']: break
m_red = max(gcds, key = lambda a: a[2]) # WARNING: only one of several possible reducing configurations is taken!
# this structure probably contains defects
if m_red[2] > tilde_obj.info['expanded']:
# check reasonable defect concentration (more than 25% is not a defect anymore!)
c = float(m_red[1]*100) / m_red[2]
if c > 25: return tilde_obj
tilde_obj.info['expanded'] = m_red[2]
tilde_obj.info['contents'][ m_red[0] ] += m_red[1]
for n, i in enumerate(map(lambda x: x/tilde_obj.info['expanded'], tilde_obj.info['contents'])):
if i>1: tilde_obj.info['standard'] += tilde_obj.info['elements'][n] + str(i)
else: tilde_obj.info['standard'] += tilde_obj.info['elements'][n]
if n == m_red[0]:
if i==1: tilde_obj.info['standard'] += '1-d'
else: tilde_obj.info['standard'] += '-d'
tilde_obj.info['vac'] = round(c, 2)
tilde_obj.info['tags'].append(0x2)
return tilde_obj
| tilde-lab/tilde | tilde/classifiers/defects.py | Python | mit | 2,112 |
# flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # Django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
USER_MODEL = {
'orm_label': '%s.%s' % (User._meta.app_label, User._meta.object_name),
'model_label': '%s.%s' % (User._meta.app_label, User._meta.module_name),
'object_name': User.__name__,
}
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field long_description on 'DocumentTitle'
db.create_table('document_library_documenttitle_long_description', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('documenttitle', models.ForeignKey(orm['document_library.documenttitle'], null=False)),
('placeholder', models.ForeignKey(orm['cms.placeholder'], null=False))
))
db.create_unique('document_library_documenttitle_long_description', ['documenttitle_id', 'placeholder_id'])
def backwards(self, orm):
# Removing M2M table for field long_description on 'DocumentTitle'
db.delete_table('document_library_documenttitle_long_description')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
USER_MODEL['model_label']: {
'Meta': {'object_name': USER_MODEL['object_name']},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'document_library.document': {
'Meta': {'ordering': "('position', '-creation_date')", 'object_name': 'Document'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['document_library.DocumentCategory']", 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'download_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_on_front_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % USER_MODEL['orm_label'], 'null': 'True', 'blank': 'True'})
},
'document_library.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'})
},
'document_library.documentcategorytitle': {
'Meta': {'object_name': 'DocumentCategoryTitle'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['document_library.DocumentCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'document_library.documenttitle': {
'Meta': {'object_name': 'DocumentTitle'},
'copyright_notice': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['document_library.Document']"}),
'filer_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'long_description': ('djangocms_utils.fields.M2MPlaceholderField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['%s']" % USER_MODEL['orm_label']}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['%s']" % USER_MODEL['orm_label']}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['document_library']
| bitmazk/django-document-library | document_library/south_migrations/0008_auto.py | Python | mit | 11,252 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('cms_pages', '0009_auto_20151025_0116'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='credits',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[UA] \u0422\u0435\u043a\u0441\u0442 \u043f\u0456\u0434 \u0441\u0442\u0430\u0442\u0438\u0441\u0442\u0438\u043a\u043e\u044e'),
),
migrations.AddField(
model_name='homepage',
name='credits_en',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[EN] \u0422\u0435\u043a\u0441\u0442 \u043f\u0456\u0434 \u0441\u0442\u0430\u0442\u0438\u0441\u0442\u0438\u043a\u043e\u044e'),
),
migrations.AddField(
model_name='homepage',
name='subtitle',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[UA] \u0422\u0435\u043a\u0441\u0442 \u043d\u0430\u0434 \u043f\u043e\u0448\u0443\u043a\u043e\u043c'),
),
migrations.AddField(
model_name='homepage',
name='subtitle_en',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[EN] \u0422\u0435\u043a\u0441\u0442 \u043d\u0430\u0434 \u043f\u043e\u0448\u0443\u043a\u043e\u043c'),
),
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[UA] \u0422\u0435\u043a\u0441\u0442 \u043f\u0456\u0434 \u043f\u043e\u0448\u0443\u043a\u043e\u043c'),
),
migrations.AlterField(
model_name='homepage',
name='body_en',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[EN] \u0422\u0435\u043a\u0441\u0442 \u043f\u0456\u0434 \u043f\u043e\u0448\u0443\u043a\u043e\u043c'),
),
migrations.AlterField(
model_name='homepage',
name='footer',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[UA] \u0422\u0435\u043a\u0441\u0442 \u0432\u043d\u0438\u0437\u0443 \u043a\u043e\u0436\u043d\u043e\u0457 \u0441\u0442\u043e\u0440\u0456\u043d\u043a\u0438'),
),
migrations.AlterField(
model_name='homepage',
name='footer_en',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[EN] \u0422\u0435\u043a\u0441\u0442 \u0432\u043d\u0438\u0437\u0443 \u043a\u043e\u0436\u043d\u043e\u0457 \u0441\u0442\u043e\u0440\u0456\u043d\u043a\u0438'),
),
]
| dchaplinsky/pep.org.ua | pepdb/cms_pages/migrations/0010_auto_20160223_0142.py | Python | mit | 2,769 |
from django import forms
from django.contrib.auth.models import User
from captcha.fields import CaptchaField
from models import UserProfile
class UserForm(forms.ModelForm):
password = forms.CharField(required=True, widget=forms.PasswordInput(
attrs={'class':'form-control',
'placeholder' :'Password',
}))
captcha = CaptchaField()
username = forms.CharField(required=True, widget=forms.TextInput(
attrs={'class':'form-control',
'placeholder' :'User Name',
}))
email = forms.EmailField(required=True,widget=forms.TextInput(
attrs={'class':'form-control',
'placeholder' :'[email protected]',
}))
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
phone = forms.CharField(required=True,widget=forms.TextInput(
attrs={'class':'form-control',
'placeholder' :'(416)-111-1234',
}))
class Meta:
model = UserProfile
fields = ('phone',) | vollov/sm | django/account/forms.py | Python | mit | 1,248 |
def loopArray(l, r):
global nr, value
dif = (r - l) + 1
for x in range(l, r + 1):
nr.append(x)
for x in range(0, dif):
for i in range(0, dif):
if nr[x] == nr[i]:
value += "0"
else:
value += "1"
return nr, value
value = ""
nr = list()
print(loopArray(10, 15))
| alphazilla/HR_challenges | python/basic-data-types/loop-array.py | Python | mit | 357 |
#!/usr/bin/env python3
"""
The relabeler reads data files and outputs new data files with different information.
The input files must contain one word per line, with sentences separated by blank lines. Each word is annotated with the
following tab-separated fields:
1. token offset within sentence
2. word
3. lowercase lemma
4. part of speech
5. multi-word expression (MWE) tag
6. offset from parent token (if inside an MWE; blank otherwise)
7. strength level
8. supersense label (if applicable)
9. sentence ID
The input data uses the following six tags for MWE labeling:
O - not part of or inside any MWE
o - not part of an MWE, but inside one
B - first token of an MWE, not inside another MWE
b - first token of an MWE occurring inside another MWE
I - token continuing an MWE, but not inside another MWE
i - token continuing an MWE which occurs inside another MWE
This script will output the same number of lines, with one word per line, but with the following tab-separated fields:
1. token offset within sentence
2. word
3. lowercase lemma
4. part of speech
5. MWE tag (revised)
6. offset from parent token (if applicable)
7. sentence ID
The revised MWE tags are:
O - not part of an MWE
B - first token of an MWE
I - token continuing an MWE
In this annotation scheme, there are only top-level MWEs.
"""
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='data file to read')
parser.add_argument('output_file', help='location to output revised data to')
args = parser.parse_args()
print(f"Relabeling input data from '{args.input_file}' to '{args.output_file}'...")
with open(args.output_file, 'w') as outfile:
with open(args.input_file) as infile:
for line in infile:
if line and not line.isspace():
try:
off, word, lowlem, pos, mwe, paroff, strength, sup_and_id = line.strip(' \n').split('\t', 7)
if len(sup_and_id.split('\t')) == 1:
supsen = sup_and_id
sentid = ''
else:
supsen, sentid = sup_and_id.split('\t')
except ValueError:
print(f"Error with line: {repr(line)}")
raise
if mwe.islower():
# Anything which occurs inside an MWE is considered part of that MWE.
mwe = 'I'
outfile.write('\t'.join([off, word, lowlem, pos, mwe, paroff, sentid]))
outfile.write('\n')
print("Relabeling complete.")
| pdarragh/MinSem | relabel.py | Python | mit | 2,712 |
"""Files pipeline back-ported to python 2.6"""
# Copy pasted from
# https://github.com/scrapy/scrapy/blob/master/scrapy/contrib/pipeline/files.py
import hashlib
import os
import os.path
import rfc822
import time
import urlparse
from collections import defaultdict
from cStringIO import StringIO
from twisted.internet import defer, threads
from scrapy import log
from scrapy.contrib.pipeline.media import MediaPipeline
from scrapy.exceptions import NotConfigured, IgnoreRequest
from scrapy.http import Request
from scrapy.utils.misc import md5sum
class FileException(Exception):
"""General media error exception"""
class FSFilesStore(object):
def __init__(self, basedir):
if '://' in basedir:
basedir = basedir.split('://', 1)[1]
self.basedir = basedir
self._mkdir(self.basedir)
self.created_directories = defaultdict(set)
def persist_file(self, key, buf, info, meta=None, headers=None):
absolute_path = self._get_filesystem_path(key)
self._mkdir(os.path.dirname(absolute_path), info)
with open(absolute_path, 'wb') as f:
f.write(buf.getvalue())
def stat_file(self, key, info):
absolute_path = self._get_filesystem_path(key)
try:
last_modified = os.path.getmtime(absolute_path)
except: # FIXME: catching everything!
return {}
with open(absolute_path, 'rb') as f:
checksum = md5sum(f)
return {'last_modified': last_modified, 'checksum': checksum}
def _get_filesystem_path(self, key):
path_comps = key.split('/')
return os.path.join(self.basedir, *path_comps)
def _mkdir(self, dirname, domain=None):
seen = self.created_directories[domain] if domain else set()
if dirname not in seen:
if not os.path.exists(dirname):
os.makedirs(dirname)
seen.add(dirname)
class S3FilesStore(object):
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = None
POLICY = 'public-read'
HEADERS = {
'Cache-Control': 'max-age=172800',
}
def __init__(self, uri):
assert uri.startswith('s3://')
self.bucket, self.prefix = uri[5:].split('/', 1)
def stat_file(self, key, info):
def _onsuccess(boto_key):
checksum = boto_key.etag.strip('"')
last_modified = boto_key.last_modified
modified_tuple = rfc822.parsedate_tz(last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
return {'checksum': checksum, 'last_modified': modified_stamp}
return self._get_boto_key(key).addCallback(_onsuccess)
def _get_boto_bucket(self):
from boto.s3.connection import S3Connection
# disable ssl (is_secure=False) because of this python bug:
# http://bugs.python.org/issue5103
c = S3Connection(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, is_secure=False)
return c.get_bucket(self.bucket, validate=False)
def _get_boto_key(self, key):
b = self._get_boto_bucket()
key_name = '%s%s' % (self.prefix, key)
return threads.deferToThread(b.get_key, key_name)
def persist_file(self, key, buf, info, meta=None, headers=None):
"""Upload file to S3 storage"""
b = self._get_boto_bucket()
key_name = '%s%s' % (self.prefix, key)
k = b.new_key(key_name)
if meta:
for metakey, metavalue in meta.iteritems():
k.set_metadata(metakey, str(metavalue))
h = self.HEADERS.copy()
if headers:
h.update(headers)
buf.seek(0)
return threads.deferToThread(k.set_contents_from_string, buf.getvalue(),
headers=h, policy=self.POLICY)
class FilesPipeline(MediaPipeline):
"""Abstract pipeline that implement the file downloading
This pipeline tries to minimize network transfers and file processing,
doing stat of the files and determining if file is new, uptodate or
expired.
`new` files are those that pipeline never processed and needs to be
downloaded from supplier site the first time.
`uptodate` files are the ones that the pipeline processed and are still
valid files.
`expired` files are those that pipeline already processed but the last
modification was made long time ago, so a reprocessing is recommended to
refresh it in case of change.
"""
MEDIA_NAME = "file"
EXPIRES = 90
STORE_SCHEMES = {
'': FSFilesStore,
'file': FSFilesStore,
's3': S3FilesStore,
}
def __init__(self, store_uri, download_func=None):
if not store_uri:
raise NotConfigured
self.store = self._get_store(store_uri)
super(FilesPipeline, self).__init__(download_func=download_func)
@classmethod
def from_settings(cls, settings):
s3store = cls.STORE_SCHEMES['s3']
s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
cls.EXPIRES = settings.getint('FILES_EXPIRES', 90)
store_uri = settings['FILES_STORE']
return cls(store_uri)
def _get_store(self, uri):
if os.path.isabs(uri): # to support win32 paths like: C:\\some\dir
scheme = 'file'
else:
scheme = urlparse.urlparse(uri).scheme
store_cls = self.STORE_SCHEMES[scheme]
return store_cls(uri)
def media_to_download(self, request, info):
def _onsuccess(result):
if not result:
return # returning None force download
last_modified = result.get('last_modified', None)
if not last_modified:
return # returning None force download
age_seconds = time.time() - last_modified
age_days = age_seconds / 60 / 60 / 24
if age_days > self.EXPIRES:
return # returning None force download
referer = request.headers.get('Referer')
log.msg(format='File (uptodate): Downloaded %(medianame)s from %(request)s referred in <%(referer)s>',
level=log.DEBUG, spider=info.spider,
medianame=self.MEDIA_NAME, request=request, referer=referer)
self.inc_stats(info.spider, 'uptodate')
checksum = result.get('checksum', None)
return {'url': request.url, 'path': key, 'checksum': checksum}
key = self.file_key(request.url)
dfd = defer.maybeDeferred(self.store.stat_file, key, info)
dfd.addCallbacks(_onsuccess, lambda _: None)
dfd.addErrback(log.err, self.__class__.__name__ + '.store.stat_file')
return dfd
def media_failed(self, failure, request, info):
if not isinstance(failure.value, IgnoreRequest):
referer = request.headers.get('Referer')
log.msg(format='File (unknown-error): Error downloading '
'%(medianame)s from %(request)s referred in '
'<%(referer)s>: %(exception)s',
level=log.WARNING, spider=info.spider, exception=failure.value,
medianame=self.MEDIA_NAME, request=request, referer=referer)
raise FileException
def media_downloaded(self, response, request, info):
referer = request.headers.get('Referer')
if response.status != 200:
log.msg(format='File (code: %(status)s): Error downloading image from %(request)s referred in <%(referer)s>',
level=log.WARNING, spider=info.spider,
status=response.status, request=request, referer=referer)
raise FileException('download-error')
if not response.body:
log.msg(format='File (empty-content): Empty image from %(request)s referred in <%(referer)s>: no-content',
level=log.WARNING, spider=info.spider,
request=request, referer=referer)
raise FileException('empty-content')
status = 'cached' if 'cached' in response.flags else 'downloaded'
log.msg(format='File (%(status)s): Downloaded image from %(request)s referred in <%(referer)s>',
level=log.DEBUG, spider=info.spider,
status=status, request=request, referer=referer)
self.inc_stats(info.spider, status)
try:
key = self.file_key(request.url)
checksum = self.file_downloaded(response, request, info)
except FileException as exc:
whyfmt = 'File (error): Error processing image from %(request)s referred in <%(referer)s>: %(errormsg)s'
log.msg(format=whyfmt, level=log.WARNING, spider=info.spider,
request=request, referer=referer, errormsg=str(exc))
raise
except Exception as exc:
whyfmt = 'File (unknown-error): Error processing image from %(request)s referred in <%(referer)s>'
log.err(None, whyfmt % {'request': request, 'referer': referer}, spider=info.spider)
raise FileException(str(exc))
return {'url': request.url, 'path': key, 'checksum': checksum}
def inc_stats(self, spider, status):
spider.crawler.stats.inc_value('file_count', spider=spider)
spider.crawler.stats.inc_value('file_status_count/%s' % status, spider=spider)
### Overridable Interface
def get_media_requests(self, item, info):
return [Request(x) for x in item.get('file_urls', [])]
def file_key(self, url):
media_guid = hashlib.sha1(url).hexdigest()
media_ext = os.path.splitext(url)[1]
return 'full/%s%s' % (media_guid, media_ext)
def file_downloaded(self, response, request, info):
key = self.file_key(request.url)
buf = StringIO(response.body)
self.store.persist_file(key, buf, info)
checksum = md5sum(buf)
return checksum
def item_completed(self, results, item, info):
if 'files' in item.fields:
item['files'] = [x for ok, x in results if ok]
return item
| BlogForever/crawler | bibcrawl/pipelines/files.py | Python | mit | 10,181 |
# -*- coding: utf-8 -*-
__version__ = '0.0.1'
def version():
return __version__
# adapted from:
# http://stackoverflow.com/questions/7204805/dictionaries-of-dictionaries-merge/7205107#7205107
def dict_merge(a, b, path=None):
"""merges b into a"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
dict_merge(a[key], b[key], path + [str(key)])
elif a[key] != b[key]:
# update the value
a[key] = b[key]
else:
a[key] = b[key]
return a
| finklabs/aws-deploy | botodeploy/utils.py | Python | mit | 627 |
# parse and plot output from LibPaxos3 and LibFPaxos
import csv
import numpy as np
# parse csv output
def read(filename):
latency = []
throughput = []
with open(filename, newline='') as csvfile:
for row in csv.reader(csvfile):
if len(row) == 3:
thr = row[0].rsplit(" ")
bw = row[1].rsplit(" ")
lat = row[2].rsplit(" ")
if len(thr) == 2 and len(bw) == 3 and len(lat) == 11 and lat[9].isdigit() and thr[0].isdigit():
latency.append(int(lat[9])/1000)
throughput.append(int(thr[0]))
return (latency, throughput)
# average over averaged values
def average (arr1,arr2):
total = 0
items = 0
for x,y in zip(arr1,arr2):
total =+ x*y
items =+ y
if items == 0:
return 0
else:
return (total/items)
# get phase data from LibPaxos3
paxos_data = {}
paxos_throughput = {}
for n in range(9,15,1):
maj = int(floor(n/2)+1)
paxos_data[n], paxos_throughput[n] = read(
"paxos/client-config_r"+str(n)+"_q"+str(maj)+"_g"+str(n)+".log")
replicas = list(range(3,15,1))
avg_latency = []
avg_throughput = []
for n in replicas:
if len(paxos_data[n]) > 0 and len(paxos_throughput[n]) > 0:
thr_array = paxos_throughput[n][20:100]
lat_array = paxos_data[n][20:100]
avg_latency.append(average(lat_array,thr_array))
avg_throughput.append(np.mean(thr_array))
# figure of latency/throughput of LibPaxos3
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.set_xlim([2,15])
axes.set_xlabel('Number of replicas')
axes.set_ylabel('Latency (ms)')
axes.set_title('Throughput and Latency for LibPaxos3')
l = axes.plot(replicas, avg_latency,"bx-", label='latency')
axes.set_ylim([0,80])
axes2 = axes.twinx()
axes2.set_ylabel('Throughput (reqs/sec)')
t = axes2.plot(replicas, avg_throughput,"ro-", label='throughput')
axes2.set_ylim([0,400])
axes2.set_xlim([2.5,14.5])
axes2.set_xticks(np.arange(3, 15, 1.0))
lns = l+t
labs = [l.get_label() for l in lns]
axes2.legend(lns, labs, loc=0,frameon=False)
fig.savefig('paxos.pdf', bbox_inches='tight')
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.set_xlabel("Time")
axes.set_ylabel("latency")
axes.set_ylim([0,200])
axes.set_xlim([0,100])
lines = list(range(3,15,1))
for n in lines:
axes.plot(paxos_data[n])
axes.legend(lines,loc=1,frameon=False)
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.set_xlabel("Time")
axes.set_ylabel("Throughput (reqs/sec)")
axes.set_ylim([0,400])
axes.set_xlim([0,100])
for n in lines:
axes.plot(paxos_throughput[n])
axes.legend(lines,loc=1,frameon=False)
# Now for FPaxos
fpaxos_data = {}
fpaxos_throughput = {}
replicas = list(range(3,11,1))
for n in replicas:
maj = int(floor(n/2)+1)
fpaxos_data[n] = {}
fpaxos_throughput[n] = {}
for q in range(1,maj+1):
fpaxos_data[n][q], fpaxos_throughput[n][q] = read("fpaxos-results/client-config_r"+str(n)+"_q"+str(q)+"_g"+str(q)+".log")
for n in replicas:
maj = int(floor(n/2)+1)
lines = list(range(1,maj+1))
n_latency = []
n_throughput = []
labels = []
for q in lines:
if len(fpaxos_throughput[n][q]) > 0 and len(fpaxos_data[n][q]) > 0:
thr_array = fpaxos_throughput[n][q][20:100]
lat_array = fpaxos_data[n][q][20:100]
print(n,q,average(lat_array,thr_array),np.mean(thr_array))
n_latency.append(average(lat_array,thr_array))
n_throughput.append(np.mean(thr_array))
labels.append('FPaxos '+str(q))
n_throughput.append(avg_throughput[n])
n_latency.append(avg_latency[n])
labels.append('Paxos')
ind = np.arange(maj+1) # the x locations for the groups
width = 0.15
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.set_xlim([-1,maj])
axes.set_ylim([0,50])
axes.set_ylabel('Latency (ms)')
axes.set_title('FPaxos and LibPaxos3 for '+str(n)+' replicas')
l = axes.bar(ind, n_latency, width, color="blue",label="latency")
axes2 = axes.twinx()
t = axes2.bar(width + ind, n_throughput, width, color="red",label="throughput")
axes2.set_ylabel('Throughput (reqs/sec)')
axes.set_xticks(ind + width)
axes.set_xticklabels(labels)
axes2.set_ylim([0,500])
lns = [l[1],t[1]]
labs = ['latency','throughput']
axes2.legend(lns, labs, loc=0,frameon=False,ncol=2)
fig.savefig('fpaxos_'+str(n)+'.pdf', bbox_inches='tight')
for n in replicas:
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.set_xlabel("Time")
axes.set_ylabel("latency")
axes.set_ylim([0,400])
axes.set_xlim([20,100])
maj = int(floor(n/2)+1)
lines = list(range(1,maj+1))
for q in lines:
axes.plot(fpaxos_data[n][q])
axes.legend(lines,loc=1,frameon=False)
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.set_xlabel("Time")
axes.set_ylabel("Throughput (reqs/sec)")
axes.set_ylim([0,200])
axes.set_xlim([20,100])
for q in lines:
axes.plot(fpaxos_throughput[n][q])
axes.legend(lines,loc=1,frameon=False)
| fpaxos/fpaxos-test | plotter.py | Python | mit | 5,226 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from setuptools import setup
try:
from jupyterpip import cmdclass
except:
import pip, importlib
pip.main(['install', 'jupyter-pip']); cmdclass = importlib.import_module('jupyterpip').cmdclass
setup(
name='d3networkx_psctb',
version='0.2',
description='Visualize networkx graphs using D3.js in the IPython notebook.',
author='Jonathan Frederic',
author_email='[email protected]',
license='MIT License',
url='https://github.com/jdfreder/ipython-d3networkx',
keywords='python ipython javascript d3 networkx d3networkx widget',
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License'],
packages=['d3networkx_psctb'],
include_package_data=True,
install_requires=["jupyter-pip","networkx"],
cmdclass=cmdclass('d3networkx_psctb'),
)
| exe0cdc/ipython-d3networkx | setup.py | Python | mit | 956 |
#!/usr/bin/python
import traceback
from scipy.io import wavfile
import tensorflow as tf
import numpy as np
import random
import json
import itertools
import math
import time
import pyaudio
import matplotlib.pyplot as plt
import scipy.io as sio
import params
import model
import train
import export_to_octave
import operations
parameters = params.parameters
print parameters
## Using a 440 Hz sine wave as a seed
#t = np.asarray(range(parameters['sample_length'])) / 48000.0 * 2.0 * np.pi * 440
#signal = np.sin(t)*0.3
## Using zeros as a seed
#t = np.zeros(parameters['sample_length'])
## Using an arbitrary wave file as a seed:
#(_, a1) = sio.wavfile.read("seeds/a1.wav")
#signal = a1 / (2.**15)
# We will use the last input given to the training as the seed.
(_, i) = sio.wavfile.read("input.wav")
signal = i / (2.**15)
output_signal = np.copy(np.asarray(signal))
signal = np.asarray(signal[len(signal) - parameters['sample_length'] : len(signal)])
quantization_channels = parameters['quantization_channels']
mu_law_input = operations.one_hot(operations.mu_law(signal, float(quantization_channels - 1)),
quantization_channels)
p = pyaudio.PyAudio()
generative_model = None
with tf.device("/cpu:0"):
generative_model = model.create_generative_model(parameters)
init = tf.initialize_all_variables()
saver = tf.train.Saver(tf.all_variables())
gpu_options = tf.GPUOptions()
config = tf.ConfigProto(gpu_options=gpu_options)
image = []
with tf.Session(config=config) as sess:
saver.restore(sess, 'sound-model-best')
# Creating a 100 second sample
for i in range(48000 * 100):
print "Step: ", i
[probabilities] = sess.run([generative_model['generated_output']], feed_dict = {
generative_model['mu_law_input']: mu_law_input
})
# image.append(probabilities)
def choose_value(sample):
sample = np.asarray(sample)
temperature = parameters['temperature']
cooled = np.log(sample) / temperature
sample = np.exp(cooled - np.max(cooled, axis=-1))
sample /= np.sum(sample, axis=-1)
sampled = np.random.choice(np.arange(parameters['quantization_channels']), p=sample)
probability_selected = sample[sampled]
new_value = operations.de_mu_law(sampled, float(parameters['quantization_channels'] - 1))
print "Sampled, new_value, probability_selected: ", sampled, new_value, probability_selected
return (new_value, sampled)
(next_val, next_val_raw) = choose_value(probabilities)
mu_law_input = (np.concatenate((mu_law_input, operations.one_hot(next_val_raw, quantization_channels)),
axis=0))[1:, :]
output_signal = np.append(output_signal, next_val)
# export_to_octave.save('image.mat', 'i', image)
wav = np.asarray(map(int, output_signal * (2.**15)), dtype=np.int16)
wav2 = np.asarray(map(int, signal * (2.**15)), dtype=np.int16)
export_to_octave.save('sound.mat', 's', wav)
export_to_octave.save('sound2.mat', 's', wav2)
stream = p.open(format=p.get_format_from_width(2),
channels=1,
rate=sampleFreq,
output=True)
for chunk in np.array_split(wav, 100):
stream.write(chunk, np.size(chunk, 0))
stream.stop_stream()
stream.close()
p.terminate()
| keskival/wavenet_synth | generate.py | Python | mit | 3,480 |
"""
Utility functions.
"""
import asyncio
import collections
import functools
import inspect
import io
import logging
import os
from typing import Set # noqa
import libnacl
import logbook
import logbook.compat
import logbook.more
# noinspection PyPackageRequirements
import lru
import wrapt
from .key import Key
__all__ = (
'enable_logging',
'disable_logging',
'get_logger',
'read_key_or_key_file',
'raise_server_error',
'randint',
'ViewIOReader',
'ViewIOWriter',
'async_lru_cache',
'aio_run',
'aio_run_decorator',
'aio_run_proxy_decorator',
'AioRunMixin',
)
_logger_group = logbook.LoggerGroup()
_logger_group.disabled = True
_logger_redirect_handler = logbook.compat.RedirectLoggingHandler()
_logger_convert_level_handler = logbook.compat.LoggingHandler()
def _convert_level(logging_level):
return _logger_convert_level_handler.convert_level(logging_level)
def enable_logging(level=logbook.WARNING, asyncio_level=None, aiohttp_level=None):
# Determine levels
level = logbook.lookup_level(level)
converted_level = _convert_level(level)
if asyncio_level is None:
asyncio_level = converted_level
else:
asyncio_level = _convert_level(asyncio_level)
if aiohttp_level is None:
aiohttp_level = converted_level
else:
aiohttp_level = _convert_level(aiohttp_level)
# Enable logger group
_logger_group.disabled = False
# Enable asyncio debug logging
os.environ['PYTHONASYNCIODEBUG'] = '1'
# Redirect asyncio logger
logger = logging.getLogger('asyncio')
logger.setLevel(asyncio_level)
logger.addHandler(_logger_redirect_handler)
# Redirect aiohttp logger
logger = logging.getLogger('aiohttp')
logger.setLevel(aiohttp_level)
logger.addHandler(_logger_redirect_handler)
def disable_logging():
# Reset aiohttp logger
logger = logging.getLogger('aiohttp')
logger.removeHandler(_logger_redirect_handler)
logger.setLevel(logging.NOTSET)
# Reset asyncio logger
logger = logging.getLogger('asyncio')
logger.removeHandler(_logger_redirect_handler)
logger.setLevel(logging.NOTSET)
# Disable asyncio debug logging
del os.environ['PYTHONASYNCIODEBUG']
# Disable logger group
_logger_group.disabled = True
def get_logger(name=None, level=logbook.NOTSET):
"""
Return a :class:`logbook.Logger`.
Arguments:
- `name`: The name of a specific sub-logger.
"""
base_name = 'threema.gateway'
name = base_name if name is None else '.'.join((base_name, name))
# Create new logger and add to group
logger = logbook.Logger(name=name, level=level)
_logger_group.add_logger(logger)
return logger
# TODO: Raises
def read_key_or_key_file(key, expected_type):
"""
Decode a hex-encoded key or read it from a file.
Arguments:
- `key`: A hex-encoded key or the name of a file which contains
a key.
- `expected_type`: One of the types of :class:`Key.Type`.
Return a:class:`libnacl.public.SecretKey` or
:class:`libnacl.public.PublicKey` instance.
"""
# Read key file (if any)
try:
with open(key) as file:
key = file.readline().strip()
except IOError:
pass
# Convert to key instance
return Key.decode(key, expected_type)
@asyncio.coroutine
def raise_server_error(response, error):
"""
Raise a :class:`GatewayServerError` exception from a
HTTP response. Releases the response before raising.
Arguments:
- `response`: A :class:`aiohttp.ClientResponse` instance.
- `error`: The :class:`GatewayServerError`. to instantiate.
Always raises :class:`GatewayServerError`.
"""
status = response.status
yield from response.release()
raise error(status)
def randint(a, b):
"""
Return a cryptographically secure random integer N such that
``a <= N <= b``.
"""
n = libnacl.randombytes_uniform(b) + a
assert a <= n <= b
return n
# TODO: Document properly
class ViewIOReader(io.RawIOBase):
def __init__(self, bytes_or_view):
super().__init__()
if isinstance(bytes_or_view, bytes):
bytes_or_view = memoryview(bytes_or_view)
self._view = bytes_or_view
self._offset = 0
self._length = len(self._view)
# IOBase methods
def fileno(self):
raise OSError('No file descriptors used')
def isatty(self):
return False
def readable(self):
return True
def readline(self, size=-1):
raise NotImplementedError
def readlines(self, hint=-1):
raise NotImplementedError
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
pass
elif whence == os.SEEK_CUR:
offset += self._offset
elif whence == os.SEEK_END:
offset = self._length - offset
else:
raise ValueError('Invalid whence value')
if not 0 < offset <= self._length:
raise ValueError('Offset is greater than view length')
self._offset = offset
return offset
def seekable(self):
return True
def tell(self):
return self._offset
def writable(self):
return False
# RawIOBase methods
def read(self, size=-1):
if size == -1:
return self.readall()
elif size < 0:
raise ValueError('Negative size')
start, end = self._offset, min(self._offset + size, self._length)
self._offset = end
return self._view[start:end]
def readall(self):
return self.read(self._length - self._offset)
def readinto(self, b):
data = self.readall()
b.extend(data)
return len(data)
# Custom methods
def __len__(self):
return self._length - self._offset
def readexactly(self, size):
data = self.read(size)
if len(data) < size:
raise asyncio.IncompleteReadError(data, size)
else:
return data
# TODO: Document properly
class ViewIOWriter(io.RawIOBase):
def __init__(self, bytes_or_views=None):
super().__init__()
self._views = []
self._length = 0
if bytes_or_views is not None:
for bytes_or_view in bytes_or_views:
self.writeexactly(bytes_or_view)
# IOBase methods
def fileno(self):
raise OSError('No file descriptors used')
def isatty(self):
return False
def readable(self):
return False
def seekable(self):
return False
def writable(self):
return True
# RawIOBase methods
def write(self, bytes_or_view):
# Convert to memoryview if necessary
if isinstance(bytes_or_view, bytes):
bytes_or_view = memoryview(bytes_or_view)
# Append
length = len(bytes_or_view)
self._length += length
self._views.append(bytes_or_view)
return length
def writelines(self, lines):
raise NotImplementedError
# Custom methods
def __radd__(self, other):
self.extend(other)
return self
def __len__(self):
return self._length
def getvalue(self):
return b''.join(self._views)
# noinspection PyProtectedMember
def extend(self, other):
self._views += other._views
self._length += other._length
def writeexactly(self, bytes_or_view):
return self.write(bytes_or_view)
class _HashedSeq(list):
"""
This class guarantees that hash() will be called no more than once
per element. This is important because the lru_cache() will hash
the key multiple times on a cache miss.
"""
__slots__ = 'hash_value'
# noinspection PyMissingConstructor
def __init__(self, tuple_):
self[:] = tuple_
self.hash_value = hash(tuple_)
def __hash__(self):
return self.hash_value
# noinspection PyPep8Naming
_CacheInfo = collections.namedtuple(
'CacheInfo', ('hits', 'misses', 'maxsize', 'currsize'))
def _make_key(
args, kwargs, typed,
fast_types={int, str, frozenset, type(None)},
kwargs_mark=(object(),),
):
"""
Make a cache key from optionally typed positional and keyword arguments
The key is constructed in a way that is flat as possible rather than
as a nested structure that would take more memory.
If there is only a single argument and its data type is known to cache
its hash value, then that argument is returned without a wrapper. This
saves space and improves lookup speed.
"""
key = args
if kwargs:
sorted_items = sorted(kwargs.items())
key += kwargs_mark
for item in sorted_items:
key += item
else:
sorted_items = []
if typed:
key += tuple(type(v) for v in args)
if kwargs:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fast_types:
return key[0]
return _HashedSeq(key)
class _LRUCacheDict(lru.LRUCacheDict):
def __init__(self, *args, **kwargs):
self.hits = self.misses = 0
super().__init__(*args, **kwargs)
def __len__(self):
return self.size()
def info(self):
"""Report cache statistics"""
return _CacheInfo(self.hits, self.misses, self.max_size, len(self))
def __getitem__(self, key):
try:
item = super().__getitem__(key)
except KeyError:
self.misses += 1
raise
else:
self.hits += 1
return item
def clear(self):
super().clear()
self.hits = self.misses = 0
def async_lru_cache(maxsize=1024, expiration=15 * 60, typed=False):
"""
Least-recently-used cache decorator for asyncio coroutines.
If *maxsize* is set to None, the LRU features are disabled and the
cache can grow without bound.
If *expiration* is set, cached values will be cleared after
*expiration* seconds.
If *typed* is True, arguments of different types will be cached
separately. For example, f(3.0) and f(3) will be treated as distinct
calls with distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize,
currsize) with f.cache_info(). Clear the cache and statistics
with f.cache_clear(). Access the underlying function with
f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
def decorating_function(func):
cache = _LRUCacheDict(max_size=maxsize, expiration=expiration)
@asyncio.coroutine
def wrapper(*args, **kwargs):
# Make cached key
key = _make_key(args, kwargs, typed)
# Get from cache
try:
return cache[key]
except KeyError:
pass
# Miss, retrieve from coroutine
value = yield from func(*args, **kwargs)
cache[key] = value
return value
wrapper.cache = cache
wrapper.cache_info = cache.info
wrapper.cache_clear = cache.clear
return functools.update_wrapper(wrapper, func)
return decorating_function
def aio_run(coroutine, loop=None, close_after_complete=False):
"""
Decorator to run an asyncio coroutine as a normal blocking
function.
Arguments:
- `coroutine`: The asyncio coroutine or task to be executed.
- `loop`: An optional :class:`asyncio.AbstractEventLoop`
subclass instance.
- `close_after_complete`: Close `loop` after the coroutine
returned. Defaults to ``False``.
Returns the result of the asyncio coroutine.
Example:
.. code-block::
@asyncio.coroutine
def coroutine(timeout):
yield from asyncio.sleep(timeout)
return True
# Call coroutine in a blocking manner
result = aio_run(coroutine(1.0))
print(result)
"""
# Create a new event loop (if required)
if loop is None:
loop_ = asyncio.get_event_loop()
# Closed? Set a new one
if loop_.is_closed():
loop_ = asyncio.new_event_loop()
asyncio.set_event_loop(loop_)
else:
loop_ = loop
# Run the coroutine and get the result
result = loop_.run_until_complete(coroutine)
# Close loop (if requested)
if close_after_complete:
loop_.close()
# Return the result
return result
def aio_run_decorator(loop=None, close_after_complete=False):
"""
Decorator to run an asyncio coroutine as a normal blocking
function.
Arguments:
- `loop`: An optional :class:`asyncio.AbstractEventLoop`
subclass instance.
- `close_after_complete`: Close `loop` after the coroutine
returned. Defaults to ``False``.
Returns a decorator to wrap around an asyncio coroutine.
Example:
.. code-block::
@asyncio.coroutine
def coroutine(timeout):
yield from asyncio.sleep(timeout)
return True
@aio_run_decorator()
def helper(*args, **kwargs):
return coroutine(*args, **kwargs)
# Call coroutine in a blocking manner
result = helper(timeout=1.0)
print(result)
"""
def _decorator(func):
# Make it a coroutine if it isn't one already
if not asyncio.iscoroutinefunction(func):
func = asyncio.coroutine(func)
def _wrapper(*args, **kwargs):
return aio_run(
func(*args, **kwargs),
loop=loop,
close_after_complete=close_after_complete,
)
return functools.update_wrapper(_wrapper, func)
return _decorator
def aio_run_proxy_decorator(cls):
"""
Proxy a publicly accessible class and run all methods marked as
async inside it (using the class attribute `async_functions`) with
an event loop to make it appear as a traditional blocking method.
Arguments:
- `cls`: A class to be wrapped. The class must inherit
:class:`AioRunMixin`. The class and all base classes must
supply a class attribute `async_functions` which is an
iterable of method names that should appear as traditional
blocking functions from the outside.
Returns a class factory.
.. note:: The `unwrap` property of the resulting instance can be
used to get the original instance.
"""
# Ensure each base class has added a class-level iterable of async functions
async_functions = set()
for base_class in inspect.getmro(cls)[:-1]:
try:
async_functions.update(base_class.__dict__.get('async_functions', None))
except TypeError:
message = "Class {} is missing 'async_functions' iterable"
raise ValueError(message.format(base_class.__name__))
# Sanity-check
if not issubclass(cls, AioRunMixin):
raise TypeError("Class {} did not inherit 'AioRunMixin'".format(
cls.__name__))
class _AioRunProxyDecoratorFactory(wrapt.ObjectProxy):
def __call__(self, *args, **kwargs):
# Create instance
instance = cls(*args, **kwargs)
# Sanity-check
if not isinstance(instance, AioRunMixin):
raise TypeError("Class {} did not inherit 'AioRunMixin'".format(
cls.__name__))
# Wrap with proxy (if required)
if instance.blocking:
class _AioRunProxy(wrapt.ObjectProxy):
@property
def unwrap(self):
"""
Get the wrapped instance.
"""
return self.__wrapped__
# Wrap all async functions with `aio_run`
for name in async_functions:
def _method(instance_, name_, *args_, **kwargs_):
method = aio_run_decorator()(getattr(instance_, name_))
return method(*args_, **kwargs_)
_method = functools.partial(_method, instance, name)
setattr(_AioRunProxy, name, _method)
return _AioRunProxy(instance)
else:
return instance
return _AioRunProxyDecoratorFactory(cls)
class AioRunMixin:
"""
Must be inherited when using :func:`aio_run_proxy_decorator`.
Arguments:
- `blocking`: Switch to turn the blocking API on or off.
"""
async_functions = set() # type: Set[str]
def __init__(self, blocking=False):
self.blocking = blocking
@property
def unwrap(self):
"""
Get the wrapped instance.
"""
return self
| threema-ch/threema-msgapi-sdk-python | threema/gateway/util.py | Python | mit | 16,977 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
See:
- https://docs.python.org/3/library/tempfile.html
- http://sametmax.com/fichiers-temporaires-avec-tempfile-en-python/ (french)
"""
import tempfile
import os
# Create a temporary directory using a context manager
home_path = os.path.expanduser("~")
with tempfile.TemporaryDirectory(dir=home_path, prefix=".", suffix="_test") as temp_dir_path:
print(temp_dir_path)
# The directory and all its contents are removed now
| jeremiedecock/snippets | python/tempfile_make_temporary_directory_with_prefix_suffix_and_dirpath.py | Python | mit | 1,597 |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float32
import numpy as np
# publish command
def publisher(data):
pub = rospy.Publisher('pid/cmd',Float32, queue_size=10)
rospy.init_node('pid_node', anonymous=True)
rate = rospy.Rate(10) #10hz
msg = data[1]
while not rospy.is_shutdown():
pub.publish(msg)
rate.sleep()
class PID(object):
def __init__(self):
self.kp = 0
self.kd = 0
self.ki = 0
self.u = 0
self.err = 0
self.err_hist = []
self.errp = 0
def pid_cap(self):
self.err_hist.append(self.err)
if len(self.err_hist) > 0:
errp = err_hist[len(self.err_hist)]
err_tot = sum(self.err_hist)
else:
errp = 0
err_tot = 0
dt = 0.05
# PID
P = self.kp*np.arctan(np.tan(self.err/2))
I = self.ki*err_tot*dt
D = self.kd*np.sin(err - errp)/dt
u = P+I+D
publisher(u)
def main():
PID()
if __name__ == '__main__':
main()
| Projet-Guerledan/ROSonPi | glider_dir/src/reg_pkg/src/reg.py | Python | mit | 1,068 |
from django.shortcuts import render
# Create your views here.
from django.core.serializers import serialize
from django.http import HttpResponseRedirect,HttpResponse
from django.core.urlresolvers import reverse
from .forms import LocationForm
from django.shortcuts import render_to_response
from .models import *
from django.views.decorators.csrf import csrf_exempt
from django.contrib.gis.geos import GEOSGeometry
import json
def testmap(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = LocationForm(request.POST)
# check whether it's valid:
if form.is_valid():
# process the data in form.cleaned_data as required
# ...
# redirect to a new URL:
return HttpResponseRedirect(reverse('maplist'))
# if a GET (or any other method) we'll create a blank form
else:
form = LocationForm()
return render(request, 'testmap.html', {'form': form.as_p()})
@csrf_exempt
def overlay_feature(request):
#wait //https://docs.djangoproject.com/en/1.8/ref/contrib/gis/serializers/
#if request.method == 'POST':
# selector_point
# else:
# if request.method == 'POST':
# s=request;
# else:
b=json.loads(request.body) #
base_geom_to_find_wkt=b['wkt']
base_geom_to_find=GEOSGeometry(base_geom_to_find_wkt)
#a=json.loads(request.body) #credit-->http://stackoverflow.com/questions/24068576/how-to-receive-json-data-using-http-post-request-in-django-1-6
papa_filtered=papa.objects.filter(geom__coveredby=base_geom_to_find)
s=serialize('geojson', papa_filtered,geometry_field='geom')
return HttpResponse(s, content_type="application/json")
#return render(request, '_echo.html', {'echo': base_geom_to_find_wkt})
| bird50/birdproj | mapservice/views.py | Python | mit | 1,889 |
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
from collections import namedtuple
from inspect import isclass
import re
from peewee import *
from peewee import _StringField
from peewee import _query_val_transform
from peewee import CommaNodeList
from peewee import SCOPE_VALUES
from peewee import text_type
try:
from pymysql.constants import FIELD_TYPE
except ImportError:
try:
from MySQLdb.constants import FIELD_TYPE
except ImportError:
FIELD_TYPE = None
try:
from playhouse import postgres_ext
except ImportError:
postgres_ext = None
RESERVED_WORDS = set([
'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif',
'else', 'except', 'exec', 'finally', 'for', 'from', 'global', 'if',
'import', 'in', 'is', 'lambda', 'not', 'or', 'pass', 'print', 'raise',
'return', 'try', 'while', 'with', 'yield',
])
class UnknownField(object):
pass
class Column(object):
"""
Store metadata about a database column.
"""
primary_key_types = (IntegerField, AutoField)
def __init__(self, name, field_class, raw_column_type, nullable,
primary_key=False, column_name=None, index=False,
unique=False, default=None, extra_parameters=None):
self.name = name
self.field_class = field_class
self.raw_column_type = raw_column_type
self.nullable = nullable
self.primary_key = primary_key
self.column_name = column_name
self.index = index
self.unique = unique
self.default = default
self.extra_parameters = extra_parameters
# Foreign key metadata.
self.rel_model = None
self.related_name = None
self.to_field = None
def __repr__(self):
attrs = [
'field_class',
'raw_column_type',
'nullable',
'primary_key',
'column_name']
keyword_args = ', '.join(
'%s=%s' % (attr, getattr(self, attr))
for attr in attrs)
return 'Column(%s, %s)' % (self.name, keyword_args)
def get_field_parameters(self):
params = {}
if self.extra_parameters is not None:
params.update(self.extra_parameters)
# Set up default attributes.
if self.nullable:
params['null'] = True
if self.field_class is ForeignKeyField or self.name != self.column_name:
params['column_name'] = "'%s'" % self.column_name
if self.primary_key and not issubclass(self.field_class, AutoField):
params['primary_key'] = True
if self.default is not None:
params['constraints'] = '[SQL("DEFAULT %s")]' % self.default
# Handle ForeignKeyField-specific attributes.
if self.is_foreign_key():
params['model'] = self.rel_model
if self.to_field:
params['field'] = "'%s'" % self.to_field
if self.related_name:
params['backref'] = "'%s'" % self.related_name
# Handle indexes on column.
if not self.is_primary_key():
if self.unique:
params['unique'] = 'True'
elif self.index and not self.is_foreign_key():
params['index'] = 'True'
return params
def is_primary_key(self):
return self.field_class is AutoField or self.primary_key
def is_foreign_key(self):
return self.field_class is ForeignKeyField
def is_self_referential_fk(self):
return (self.field_class is ForeignKeyField and
self.rel_model == "'self'")
def set_foreign_key(self, foreign_key, model_names, dest=None,
related_name=None):
self.foreign_key = foreign_key
self.field_class = ForeignKeyField
if foreign_key.dest_table == foreign_key.table:
self.rel_model = "'self'"
else:
self.rel_model = model_names[foreign_key.dest_table]
self.to_field = dest and dest.name or None
self.related_name = related_name or None
def get_field(self):
# Generate the field definition for this column.
field_params = {}
for key, value in self.get_field_parameters().items():
if isclass(value) and issubclass(value, Field):
value = value.__name__
field_params[key] = value
param_str = ', '.join('%s=%s' % (k, v)
for k, v in sorted(field_params.items()))
field = '%s = %s(%s)' % (
self.name,
self.field_class.__name__,
param_str)
if self.field_class is UnknownField:
field = '%s # %s' % (field, self.raw_column_type)
return field
class Metadata(object):
column_map = {}
extension_import = ''
def __init__(self, database):
self.database = database
self.requires_extension = False
def execute(self, sql, *params):
return self.database.execute_sql(sql, params)
def get_columns(self, table, schema=None):
metadata = OrderedDict(
(metadata.name, metadata)
for metadata in self.database.get_columns(table, schema))
# Look up the actual column type for each column.
column_types, extra_params = self.get_column_types(table, schema)
# Look up the primary keys.
pk_names = self.get_primary_keys(table, schema)
if len(pk_names) == 1:
pk = pk_names[0]
if column_types[pk] is IntegerField:
column_types[pk] = AutoField
elif column_types[pk] is BigIntegerField:
column_types[pk] = BigAutoField
columns = OrderedDict()
for name, column_data in metadata.items():
field_class = column_types[name]
default = self._clean_default(field_class, column_data.default)
columns[name] = Column(
name,
field_class=field_class,
raw_column_type=column_data.data_type,
nullable=column_data.null,
primary_key=column_data.primary_key,
column_name=name,
default=default,
extra_parameters=extra_params.get(name))
return columns
def get_column_types(self, table, schema=None):
raise NotImplementedError
def _clean_default(self, field_class, default):
if default is None or field_class in (AutoField, BigAutoField) or \
default.lower() == 'null':
return
if issubclass(field_class, _StringField) and \
isinstance(default, text_type) and not default.startswith("'"):
default = "'%s'" % default
return default or "''"
def get_foreign_keys(self, table, schema=None):
return self.database.get_foreign_keys(table, schema)
def get_primary_keys(self, table, schema=None):
return self.database.get_primary_keys(table, schema)
def get_indexes(self, table, schema=None):
return self.database.get_indexes(table, schema)
class PostgresqlMetadata(Metadata):
column_map = {
16: BooleanField,
17: BlobField,
20: BigIntegerField,
21: IntegerField,
23: IntegerField,
25: TextField,
700: FloatField,
701: FloatField,
1042: CharField, # blank-padded CHAR
1043: CharField,
1082: DateField,
1114: DateTimeField,
1184: DateTimeField,
1083: TimeField,
1266: TimeField,
1700: DecimalField,
2950: TextField, # UUID
}
array_types = {
1000: BooleanField,
1001: BlobField,
1005: SmallIntegerField,
1007: IntegerField,
1009: TextField,
1014: CharField,
1015: CharField,
1016: BigIntegerField,
1115: DateTimeField,
1182: DateField,
1183: TimeField,
}
extension_import = 'from playhouse.postgres_ext import *'
def __init__(self, database):
super(PostgresqlMetadata, self).__init__(database)
if postgres_ext is not None:
# Attempt to add types like HStore and JSON.
cursor = self.execute('select oid, typname, format_type(oid, NULL)'
' from pg_type;')
results = cursor.fetchall()
for oid, typname, formatted_type in results:
if typname == 'json':
self.column_map[oid] = postgres_ext.JSONField
elif typname == 'jsonb':
self.column_map[oid] = postgres_ext.BinaryJSONField
elif typname == 'hstore':
self.column_map[oid] = postgres_ext.HStoreField
elif typname == 'tsvector':
self.column_map[oid] = postgres_ext.TSVectorField
for oid in self.array_types:
self.column_map[oid] = postgres_ext.ArrayField
def get_column_types(self, table, schema):
column_types = {}
extra_params = {}
extension_types = set((
postgres_ext.ArrayField,
postgres_ext.BinaryJSONField,
postgres_ext.JSONField,
postgres_ext.TSVectorField,
postgres_ext.HStoreField)) if postgres_ext is not None else set()
# Look up the actual column type for each column.
identifier = '"%s"."%s"' % (schema, table)
cursor = self.execute('SELECT * FROM %s LIMIT 1' % identifier)
# Store column metadata in dictionary keyed by column name.
for column_description in cursor.description:
name = column_description.name
oid = column_description.type_code
column_types[name] = self.column_map.get(oid, UnknownField)
if column_types[name] in extension_types:
self.requires_extension = True
if oid in self.array_types:
extra_params[name] = {'field_class': self.array_types[oid]}
return column_types, extra_params
def get_columns(self, table, schema=None):
schema = schema or 'public'
return super(PostgresqlMetadata, self).get_columns(table, schema)
def get_foreign_keys(self, table, schema=None):
schema = schema or 'public'
return super(PostgresqlMetadata, self).get_foreign_keys(table, schema)
def get_primary_keys(self, table, schema=None):
schema = schema or 'public'
return super(PostgresqlMetadata, self).get_primary_keys(table, schema)
def get_indexes(self, table, schema=None):
schema = schema or 'public'
return super(PostgresqlMetadata, self).get_indexes(table, schema)
class MySQLMetadata(Metadata):
if FIELD_TYPE is None:
column_map = {}
else:
column_map = {
FIELD_TYPE.BLOB: TextField,
FIELD_TYPE.CHAR: CharField,
FIELD_TYPE.DATE: DateField,
FIELD_TYPE.DATETIME: DateTimeField,
FIELD_TYPE.DECIMAL: DecimalField,
FIELD_TYPE.DOUBLE: FloatField,
FIELD_TYPE.FLOAT: FloatField,
FIELD_TYPE.INT24: IntegerField,
FIELD_TYPE.LONG_BLOB: TextField,
FIELD_TYPE.LONG: IntegerField,
FIELD_TYPE.LONGLONG: BigIntegerField,
FIELD_TYPE.MEDIUM_BLOB: TextField,
FIELD_TYPE.NEWDECIMAL: DecimalField,
FIELD_TYPE.SHORT: IntegerField,
FIELD_TYPE.STRING: CharField,
FIELD_TYPE.TIMESTAMP: DateTimeField,
FIELD_TYPE.TIME: TimeField,
FIELD_TYPE.TINY_BLOB: TextField,
FIELD_TYPE.TINY: IntegerField,
FIELD_TYPE.VAR_STRING: CharField,
}
def __init__(self, database, **kwargs):
if 'password' in kwargs:
kwargs['passwd'] = kwargs.pop('password')
super(MySQLMetadata, self).__init__(database, **kwargs)
def get_column_types(self, table, schema=None):
column_types = {}
# Look up the actual column type for each column.
cursor = self.execute('SELECT * FROM `%s` LIMIT 1' % table)
# Store column metadata in dictionary keyed by column name.
for column_description in cursor.description:
name, type_code = column_description[:2]
column_types[name] = self.column_map.get(type_code, UnknownField)
return column_types, {}
class SqliteMetadata(Metadata):
column_map = {
'bigint': BigIntegerField,
'blob': BlobField,
'bool': BooleanField,
'boolean': BooleanField,
'char': CharField,
'date': DateField,
'datetime': DateTimeField,
'decimal': DecimalField,
'float': FloatField,
'integer': IntegerField,
'integer unsigned': IntegerField,
'int': IntegerField,
'long': BigIntegerField,
'numeric': DecimalField,
'real': FloatField,
'smallinteger': IntegerField,
'smallint': IntegerField,
'smallint unsigned': IntegerField,
'text': TextField,
'time': TimeField,
'varchar': CharField,
}
begin = '(?:["\[\(]+)?'
end = '(?:["\]\)]+)?'
re_foreign_key = (
'(?:FOREIGN KEY\s*)?'
'{begin}(.+?){end}\s+(?:.+\s+)?'
'references\s+{begin}(.+?){end}'
'\s*\(["|\[]?(.+?)["|\]]?\)').format(begin=begin, end=end)
re_varchar = r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$'
def _map_col(self, column_type):
raw_column_type = column_type.lower()
if raw_column_type in self.column_map:
field_class = self.column_map[raw_column_type]
elif re.search(self.re_varchar, raw_column_type):
field_class = CharField
else:
column_type = re.sub('\(.+\)', '', raw_column_type)
field_class = self.column_map.get(column_type, UnknownField)
return field_class
def get_column_types(self, table, schema=None):
column_types = {}
columns = self.database.get_columns(table)
for column in columns:
column_types[column.name] = self._map_col(column.data_type)
return column_types, {}
_DatabaseMetadata = namedtuple('_DatabaseMetadata', (
'columns',
'primary_keys',
'foreign_keys',
'model_names',
'indexes'))
class DatabaseMetadata(_DatabaseMetadata):
def multi_column_indexes(self, table):
accum = []
for index in self.indexes[table]:
if len(index.columns) > 1:
field_names = [self.columns[table][column].name
for column in index.columns
if column in self.columns[table]]
accum.append((field_names, index.unique))
return accum
def column_indexes(self, table):
accum = {}
for index in self.indexes[table]:
if len(index.columns) == 1:
accum[index.columns[0]] = index.unique
return accum
class Introspector(object):
pk_classes = [AutoField, IntegerField]
def __init__(self, metadata, schema=None):
self.metadata = metadata
self.schema = schema
def __repr__(self):
return '<Introspector: %s>' % self.metadata.database
@classmethod
def from_database(cls, database, schema=None):
if isinstance(database, PostgresqlDatabase):
metadata = PostgresqlMetadata(database)
elif isinstance(database, MySQLDatabase):
metadata = MySQLMetadata(database)
elif isinstance(database, SqliteDatabase):
metadata = SqliteMetadata(database)
else:
raise ValueError('Introspection not supported for %r' % database)
return cls(metadata, schema=schema)
def get_database_class(self):
return type(self.metadata.database)
def get_database_name(self):
return self.metadata.database.database
def get_database_kwargs(self):
return self.metadata.database.connect_params
def get_additional_imports(self):
if self.metadata.requires_extension:
return '\n' + self.metadata.extension_import
return ''
def make_model_name(self, table):
model = re.sub('[^\w]+', '', table)
model_name = ''.join(sub.title() for sub in model.split('_'))
if not model_name[0].isalpha():
model_name = 'T' + model_name
return model_name
def make_column_name(self, column, is_foreign_key=False):
column = column.lower().strip()
if is_foreign_key:
# Strip "_id" from foreign keys, unless the foreign-key happens to
# be named "_id", in which case the name is retained.
column = re.sub('_id$', '', column) or column
# Remove characters that are invalid for Python identifiers.
column = re.sub('[^\w]+', '_', column)
if column in RESERVED_WORDS:
column += '_'
if len(column) and column[0].isdigit():
column = '_' + column
return column
def introspect(self, table_names=None, literal_column_names=False,
include_views=False):
# Retrieve all the tables in the database.
tables = self.metadata.database.get_tables(schema=self.schema)
if include_views:
views = self.metadata.database.get_views(schema=self.schema)
tables.extend([view.name for view in views])
if table_names is not None:
tables = [table for table in tables if table in table_names]
table_set = set(tables)
# Store a mapping of table name -> dictionary of columns.
columns = {}
# Store a mapping of table name -> set of primary key columns.
primary_keys = {}
# Store a mapping of table -> foreign keys.
foreign_keys = {}
# Store a mapping of table name -> model name.
model_names = {}
# Store a mapping of table name -> indexes.
indexes = {}
# Gather the columns for each table.
for table in tables:
table_indexes = self.metadata.get_indexes(table, self.schema)
table_columns = self.metadata.get_columns(table, self.schema)
try:
foreign_keys[table] = self.metadata.get_foreign_keys(
table, self.schema)
except ValueError as exc:
err(*exc.args)
foreign_keys[table] = []
else:
# If there is a possibility we could exclude a dependent table,
# ensure that we introspect it so FKs will work.
if table_names is not None:
for foreign_key in foreign_keys[table]:
if foreign_key.dest_table not in table_set:
tables.append(foreign_key.dest_table)
table_set.add(foreign_key.dest_table)
model_names[table] = self.make_model_name(table)
# Collect sets of all the column names as well as all the
# foreign-key column names.
lower_col_names = set(column_name.lower()
for column_name in table_columns)
fks = set(fk_col.column for fk_col in foreign_keys[table])
for col_name, column in table_columns.items():
if literal_column_names:
new_name = re.sub('[^\w]+', '_', col_name)
else:
new_name = self.make_column_name(col_name, col_name in fks)
# If we have two columns, "parent" and "parent_id", ensure
# that when we don't introduce naming conflicts.
lower_name = col_name.lower()
if lower_name.endswith('_id') and new_name in lower_col_names:
new_name = col_name.lower()
column.name = new_name
for index in table_indexes:
if len(index.columns) == 1:
column = index.columns[0]
if column in table_columns:
table_columns[column].unique = index.unique
table_columns[column].index = True
primary_keys[table] = self.metadata.get_primary_keys(
table, self.schema)
columns[table] = table_columns
indexes[table] = table_indexes
# Gather all instances where we might have a `related_name` conflict,
# either due to multiple FKs on a table pointing to the same table,
# or a related_name that would conflict with an existing field.
related_names = {}
sort_fn = lambda foreign_key: foreign_key.column
for table in tables:
models_referenced = set()
for foreign_key in sorted(foreign_keys[table], key=sort_fn):
try:
column = columns[table][foreign_key.column]
except KeyError:
continue
dest_table = foreign_key.dest_table
if dest_table in models_referenced:
related_names[column] = '%s_%s_set' % (
dest_table,
column.name)
else:
models_referenced.add(dest_table)
# On the second pass convert all foreign keys.
for table in tables:
for foreign_key in foreign_keys[table]:
src = columns[foreign_key.table][foreign_key.column]
try:
dest = columns[foreign_key.dest_table][
foreign_key.dest_column]
except KeyError:
dest = None
src.set_foreign_key(
foreign_key=foreign_key,
model_names=model_names,
dest=dest,
related_name=related_names.get(src))
return DatabaseMetadata(
columns,
primary_keys,
foreign_keys,
model_names,
indexes)
def generate_models(self, skip_invalid=False, table_names=None,
literal_column_names=False, bare_fields=False,
include_views=False):
database = self.introspect(table_names, literal_column_names,
include_views)
models = {}
class BaseModel(Model):
class Meta:
database = self.metadata.database
schema = self.schema
def _create_model(table, models):
for foreign_key in database.foreign_keys[table]:
dest = foreign_key.dest_table
if dest not in models and dest != table:
_create_model(dest, models)
primary_keys = []
columns = database.columns[table]
for column_name, column in columns.items():
if column.primary_key:
primary_keys.append(column.name)
multi_column_indexes = database.multi_column_indexes(table)
column_indexes = database.column_indexes(table)
class Meta:
indexes = multi_column_indexes
table_name = table
# Fix models with multi-column primary keys.
composite_key = False
if len(primary_keys) == 0:
primary_keys = columns.keys()
if len(primary_keys) > 1:
Meta.primary_key = CompositeKey(*[
field.name for col, field in columns.items()
if col in primary_keys])
composite_key = True
attrs = {'Meta': Meta}
for column_name, column in columns.items():
FieldClass = column.field_class
if FieldClass is not ForeignKeyField and bare_fields:
FieldClass = BareField
elif FieldClass is UnknownField:
FieldClass = BareField
params = {
'column_name': column_name,
'null': column.nullable}
if column.primary_key and composite_key:
if FieldClass is AutoField:
FieldClass = IntegerField
params['primary_key'] = False
elif column.primary_key and FieldClass is not AutoField:
params['primary_key'] = True
if column.is_foreign_key():
if column.is_self_referential_fk():
params['model'] = 'self'
else:
dest_table = column.foreign_key.dest_table
params['model'] = models[dest_table]
if column.to_field:
params['field'] = column.to_field
# Generate a unique related name.
params['backref'] = '%s_%s_rel' % (table, column_name)
if column.default is not None:
constraint = SQL('DEFAULT %s' % column.default)
params['constraints'] = [constraint]
if column_name in column_indexes and not \
column.is_primary_key():
if column_indexes[column_name]:
params['unique'] = True
elif not column.is_foreign_key():
params['index'] = True
attrs[column.name] = FieldClass(**params)
try:
models[table] = type(str(table), (BaseModel,), attrs)
except ValueError:
if not skip_invalid:
raise
# Actually generate Model classes.
for table, model in sorted(database.model_names.items()):
if table not in models:
_create_model(table, models)
return models
def introspect(database, schema=None):
introspector = Introspector.from_database(database, schema=schema)
return introspector.introspect()
def generate_models(database, schema=None, **options):
introspector = Introspector.from_database(database, schema=schema)
return introspector.generate_models(**options)
def print_model(model, indexes=True, inline_indexes=False):
print(model._meta.name)
for field in model._meta.sorted_fields:
parts = [' %s %s' % (field.name, field.field_type)]
if field.primary_key:
parts.append(' PK')
elif inline_indexes:
if field.unique:
parts.append(' UNIQUE')
elif field.index:
parts.append(' INDEX')
if isinstance(field, ForeignKeyField):
parts.append(' FK: %s.%s' % (field.rel_model.__name__,
field.rel_field.name))
print(''.join(parts))
if indexes and model._meta.indexes:
print('\nindex(es)')
for index in model._meta.fields_to_index():
parts = [' ']
ctx = model._meta.database.get_sql_context()
with ctx.scope_values(param='%s', quote='""'):
ctx.sql(CommaNodeList(index._expressions))
if index._where:
ctx.literal(' WHERE ')
ctx.sql(index._where)
sql, params = ctx.query()
clean = sql % tuple(map(_query_val_transform, params))
parts.append(clean.replace('"', ''))
if index._unique:
parts.append(' UNIQUE')
print(''.join(parts))
def get_table_sql(model):
sql, params = model._schema._create_table().query()
if model._meta.database.param != '%s':
sql = sql.replace(model._meta.database.param, '%s')
# Format and indent the table declaration, simplest possible approach.
match_obj = re.match('^(.+?\()(.+)(\).*)', sql)
create, columns, extra = match_obj.groups()
indented = ',\n'.join(' %s' % column for column in columns.split(', '))
clean = '\n'.join((create, indented, extra)).strip()
return clean % tuple(map(_query_val_transform, params))
def print_table_sql(model):
print(get_table_sql(model))
| gennaios/alfred-gnosis | src/playhouse/reflection.py | Python | mit | 28,321 |
"""Simple MQTT wrapper for subscriber and publisher.
Usage:
from mqttclient import MqttPublisher, MqttSubscriber
# --- init subscriber
# msg: MQTTMessage class, which has members topic, payload, qos, retain and mid.
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.qos)+" "+str(msg.payload))
s = MqttSubscriber("hello/world")
s.start(on_message)
# --- init publisher
p = MqttPublisher("hello/world")
p.start()
# publish something
p.publish('test 123')
"""
import paho.mqtt.client as mqtt
import time
def on_connect(client, userdata, flags, rc):
m="Connected flags"+str(flags)+"; result code="\
+str(rc)+"; client: "+str(client)
print(m)
def on_subscribe(client, userdata, mid, granted_qos):
print("Subscribed: "+str(mid)+" "+str(granted_qos))
def on_disconnect(client, userdata, rc):
print("on_disconnect: userdata=%s; rc=%d" % (userdata, rc))
if rc != 0:
print("Unexpected disconnect")
class MqttBrokerClient(object):
def __init__(self, topic, client_id=None, hostname="localhost"):
self.topic = topic
self.hostname = hostname
self.client = mqtt.Client(client_id)
#attach function to callback
self.client.on_connect = on_connect
self.client.on_disconnect = on_disconnect
def start(self):
#connect to broker
self.client.connect(self.hostname)
# start the loop to receive callbacks for both subscriber and publisher
self.client.loop_start()
def stop(self):
self.client.loop_stop()
self.client.disconnect()
def user_data_set(self, userdata):
self.client.user_data_set(userdata)
class MqttPublisher(MqttBrokerClient):
def publish(self, msg):
self.client.publish(self.topic, msg)
class MqttSubscriber(MqttBrokerClient):
def start(self, receiver_cb):
MqttBrokerClient.start(self)
#attach function to callback
self.client.on_subscribe = on_subscribe
self.client.on_message=receiver_cb
#self.client.subscribe(self.topic, qos=1)
self.client.subscribe(self.topic)
def loop_forever(self):
"""Blocking loop."""
self.client.loop_forever()
| manylabs/flow | flow/mqttclient.py | Python | mit | 2,237 |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 2 16:03:30 2016
@author: Manojkumar Parmar VirKrupa
@Github Repo : MITx_Python
Modified on : 02/04/2016
Version : 1.0
Remarks:
"""
# pylint: disable=invalid-name
import random
import pylab
# You are given this function
def getMeanAndStd(X):
"""
returns calculated meand and standard deviation on list X
"""
mean = sum(X)/float(len(X))
tot = 0.0
for x in X:
tot += (x - mean)**2
std = (tot/len(X))**0.5
return mean, std
# You are given this class
# pylint: disable=R0903
class Die(object):
"""
Die class
"""
def __init__(self, valList):
""" valList is not empty """
self.possibleVals = valList[:]
def roll(self):
""" returns a single roll value of dice."""
return random.choice(self.possibleVals)
# pylint: enable=R0903
# Implement this -- Coding Part 1 of 2
def makeHistogram(values, numBins, xLabel, yLabel, title=None):
"""
- values, a sequence of numbers
- numBins, a positive int
- xLabel, yLabel, title, are strings
- Produces a histogram of values with numBins bins and the indicated
labels for the x and y axis
- If title is provided by caller, puts that title on the figure and
otherwise does not title the figure
"""
pylab.hist(values, numBins)
pylab.xlabel(xLabel)
pylab.ylabel(yLabel)
if not title is None:
pylab.title(title)
pylab.show()
return None
# Implement this -- Coding Part 2 of 2
def getAverage(die, numRolls, numTrials):
"""
- die, a Die
- numRolls, numTrials, are positive ints
- Calculates the expected mean value of the longest run of a number
over numTrials runs of numRolls rolls.
- Calls makeHistogram to produce a histogram of the longest runs for all
the trials. There should be 10 bins in the histogram
- Choose appropriate labels for the x and y axes.
- Returns the mean calculated
"""
longRun = []
while numTrials > 0:
oldElement = die.roll()
locCount = 1
gloCount = 1
numRollTrial = numRolls
while numRollTrial > 1:
element = die.roll()
if oldElement == element:
locCount += 1
else:
locCount = 1
gloCount = max(gloCount, locCount)
oldElement = element
numRollTrial = numRollTrial - 1
longRun.append(gloCount)
numTrials = numTrials - 1
makeHistogram(longRun, 10, "Bins", "# trials")
# pylint: disable=unused-variable
mean, std = getMeanAndStd(longRun)
# pylint: disable=unused-variable
return mean
# test case implementation 1
#makeHistogram([], 1, "A", "B", "C")
#makeHistogram([1], 4, "Aa", "Bb", "Cc")
#makeHistogram([1,2], 4, "Aaa", "Bbb")
#makeHistogram([1,2,5,6,9,10], 4,"Aaaa", "Bbbb", "Cccc")
#makeHistogram([21,20,19,1,2,2,2,5,6,6,9,10], 5, "Aaaaa", "Bbbbb", "Ccccc")
# test case implementation 2
#print getAverage(Die([1]), 10, 1000)
#print getAverage(Die([1,1]), 10, 1000)
#print getAverage(Die([1,2,3,4,5,6]), 50, 1000)
#print getAverage(Die([1,2,3,4,5,6,6,6,7]), 50, 1000)
#print getAverage(Die([1,2,3,4,5,6,6,6,7]), 1, 1000)
#print getAverage(Die([1, 2, 3, 4, 5, 6, 6, 6, 7]), 500, 10000)
| parmarmanojkumar/MITx_Python | 6002x/quiz/p6.py | Python | mit | 3,326 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import inspect
import logging
import os
from . import Benchmark
from .report import BaseReporter
from ._compat import load_module, string_types
log = logging.getLogger(__name__)
class BenchmarkRunner(object):
'''Collect all benchmarks and run them'''
def __init__(self, *filenames, **kwargs):
'''
:param filenames: the benchmark files names
:type filenames: string
:param reporters: the reporters classes or instance to run
:type reporters: list
:param debug: Run in debug mode if ``True``
:type debug: bool
'''
self.benchmarks = []
self.runned = []
self.reporters = []
self.debug = kwargs.get('debug', False)
for filename in filenames:
module = self.load_module(filename)
benchmarks = self.load_from_module(module)
self.benchmarks.extend(benchmarks)
for reporter in kwargs.get('reporters', []):
if inspect.isclass(reporter) and issubclass(reporter, BaseReporter):
reporter = reporter()
if isinstance(reporter, BaseReporter):
reporter.init(self)
self.reporters.append(reporter)
else:
log.warning('Unsupported reporter %s', reporter)
def run(self, **kwargs):
'''
Run all benchmarks.
Extras kwargs are passed to benchmarks construtors.
'''
self.report_start()
for bench in self.benchmarks:
bench = bench(before=self.report_before_method,
after=self.report_after_method,
after_each=self.report_progress,
debug=self.debug,
**kwargs)
self.report_before_class(bench)
bench.run()
self.report_after_class(bench)
self.runned.append(bench)
self.report_end()
def load_module(self, filename):
'''Load a benchmark module from file'''
if not isinstance(filename, string_types):
return filename
basename = os.path.splitext(os.path.basename(filename))[0]
basename = basename.replace('.bench', '')
modulename = 'benchmarks.{0}'.format(basename)
return load_module(modulename, filename)
def load_from_module(self, module):
'''Load all benchmarks from a given module'''
benchmarks = []
for name in dir(module):
obj = getattr(module, name)
if (inspect.isclass(obj) and issubclass(obj, Benchmark)
and obj != Benchmark):
benchmarks.append(obj)
return benchmarks
def report_start(self):
for reporter in self.reporters:
reporter.start()
def report_before_class(self, bench):
for reporter in self.reporters:
reporter.before_class(bench)
def report_after_class(self, bench):
for reporter in self.reporters:
reporter.after_class(bench)
def report_before_method(self, bench, method):
for reporter in self.reporters:
reporter.before_method(bench, method)
def report_after_method(self, bench, method):
for reporter in self.reporters:
reporter.after_method(bench, method)
def report_progress(self, bench, method, times):
for reporter in self.reporters:
reporter.progress(bench, method, times)
def report_end(self):
for reporter in self.reporters:
reporter.end()
| noirbizarre/minibench | minibench/runner.py | Python | mit | 3,606 |
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
ret = 0
for i in s:
ret *= 26
ret += ord(i)-ord('A')+1
return ret | xingjian-f/Leetcode-solution | 171. Excel Sheet Column Number.py | Python | mit | 238 |
NOT_PROVIDED = object()
class ConfigurationProvider:
def get(self, key):
raise NotImplementedError()
class DictConfig(ConfigurationProvider):
"""
Loads configuration values from the passed dictionary.
"""
def __init__(self, conf_dict, prefix=''):
self._conf_dict = conf_dict
self._prefix = prefix
def get(self, key):
try:
return self._conf_dict[self._prefix + key]
except KeyError:
return NOT_PROVIDED
| GaretJax/storm-erp | storm/config/providers.py | Python | mit | 495 |
"""
grey Test Utils for Motor
"""
import unittest
from tornado.ioloop import IOLoop
from grey.tests.utils import GreyTest
class GreyAsyncTest(GreyTest):
# Ensure IOLoop stops to prevent blocking tests
def callback(self, func):
def wrapper(*args, **kwargs):
IOLoop.instance().stop()
try:
func(*args, **kwargs)
except AssertionError as e:
self.error = e
return wrapper
def wait(self):
IOLoop.instance().start()
def setUp(self):
self.error = None
super(GreyAsyncTest, self).setUp()
def tearDown(self):
if self.error: self.fail(str(self.error))
super(GreyAsyncTest, self).tearDown()
| GreyCorp/GreyServer | grey/tests/utils/mongo.py | Python | mit | 731 |
from sqlalchemy import *
from sqlalchemy.orm import *
class User(object):
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.email)
# Create engine
# engine = create_engine('sqlite:///:memory:')
# echo=True for debug information
# engine = create_engine('sqlite:///./sqlalchemy.db', echo=True)
engine = create_engine('sqlite:///./sqlalchemy.db')
print engine.execute("select 1").scalar()
# Get meta data of engine
metadata = MetaData(engine)
# Insert new table
# users_table = Table('users', metadata,
# Column('id', Integer, primary_key=True),
# Column('name', String(40)),
# Column('email', String(120)))
# users_table.create()
# Insert some records
users_table = Table('users', metadata, autoload=True)
i = users_table.insert()
# i.execute(name='rsj217', email='[email protected]')
# i.execute({'name': 'ghost'}, {'name': 'test'})
# Map class with table
mapper(User, users_table)
ul = User()
print ul
# Create session method 1
session = create_session()
query = session.query(User)
u = query.filter_by(name='rsj217').first()
print u.name
# Create session method 2
Session = sessionmaker(bind=engine)
session = Session()
u = User()
u.name = 'new'
session.add(u)
session.flush()
session.commit()
| quchunguang/test | testpy/testsqlalchemy.py | Python | mit | 1,310 |
#!/usr/bin/env python
import json
import urllib2
import requests
import time
from datetime import datetime
# url =
# 'http://ec2-54-187-18-145.us-west-2.compute.amazonaws.com/node/postdata/'
url = 'http://localhost:8000/node/postdata/'
# url = 'http://clairity.mit.edu/node/postdata/'
def send(url, values):
try:
print "url : " + url
r = requests.post(url, data=values)
try:
print json.dumps(r.json(), indent=4)
except:
print "FAILED \n\n\n\n"
pass
# print rk
# print dir(r)
except:
print "sending failed"
def test():
# dylos
values = {'node_id': 3,
'dylos_bin_1': 20,
'dylos_bin_2': 20,
'dylos_bin_3': 20,
'dylos_bin_4': 20,
'reading_time': datetime.now().isoformat()
}
send(url + 'dylos/', values)
values = {'node_id': 3,
'dylos_bin_1': -9999,
'dylos_bin_2': -9999,
'dylos_bin_3': -9999,
'dylos_bin_4': -9999,
'reading_time': datetime.now().isoformat()
}
send(url + 'dylos/', values)
values = {'node_id': 3,
'temperature': 20.3,
'rh': 20.3,
'reading_time': datetime.now().isoformat()
}
send(url + 'met/', values)
values = {'node_id': 3,
'temperature': -9999,
'rh': -9999,
'reading_time': datetime.now().isoformat()
}
send(url + 'met/', values)
values = {'node_id': 3,
'alphasense_1': 20.3,
'alphasense_2': 20.3,
'alphasense_3': 20.3,
'alphasense_4': 20.3,
'alphasense_5': 20.3,
'alphasense_6': 20.3,
'alphasense_7': 20.3,
'alphasense_8': 20.3,
'reading_time': datetime.now().isoformat()
}
send(url + 'alphasense/', values)
values = {'node_id': 3,
'alphasense_1': -9999,
'alphasense_2': -9999,
'alphasense_3': -9999,
'alphasense_4': -9999,
'alphasense_5': -9999,
'alphasense_6': -9999,
'alphasense_7': -9999,
'alphasense_8': -9999,
'reading_time': datetime.now().isoformat()
}
send(url + 'alphasense/', values)
if __name__ == '__main__':
test()
| clairityproject/backend | sample_post.py | Python | mit | 2,486 |
"""jsConfirm URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
# url(r'^admin/', include(admin.site.urls)),
url(r'^', include('home.urls', namespace="home")),
url(r'^api', include('api.urls', namespace="api")),
]
| chrissmejia/jsConfirm | djangodemo/jsConfirm/urls.py | Python | mit | 872 |
from evostream.default import api
from evostream.management.base import BaseEvoStreamCommand
class Command(BaseEvoStreamCommand):
help = 'Get a list of IDs for every active stream.'
requires_system_checks = False
def get_results(self, *args, **options):
return api.list_streams_ids()
| tomi77/django-evostream | evostream/management/commands/liststreamsids.py | Python | mit | 308 |
N, S = int(input()), input()
ans = N
for i in range(1, N):
s1, s2 = S[:i], S[i:]
dp = [[0]*(len(s2)+1) for _ in range(len(s1)+1)]
for j in range(len(s1)):
for k in range(len(s2)):
if (s1[j] == s2[k]):
dp[j+1][k+1] = max([dp[j][k] + 1, dp[j][k+1], dp[j+1][k]])
else:
dp[j+1][k+1] = max(dp[j][k+1], dp[j+1][k])
ans = min(ans, N - 2 * dp[len(s1)][len(s2)])
print(ans)
| knuu/competitive-programming | atcoder/corp/codefes2015_asa_b.py | Python | mit | 445 |
"""
Django settings for django-ddp-meteor-todo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3!--#s0!((#n=4r9_g%bz^z$duwc4-wn_)546fl(_ad_+j2!5a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dddp',
'dddp.accounts',
'django_todos',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django-ddp-meteor-todo.urls'
WSGI_APPLICATION = 'django-ddp-meteor-todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'django_ddp_meteor',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| cxhandley/django-ddp-meteor-todo | django-ddp-meteor-todo/settings.py | Python | mit | 2,138 |
# -*- coding: utf-8 -*-
#
# Copyright (c), 2016-2019, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <[email protected]>
#
"""
This module contains functions and classes for namespaces XSD declarations/definitions.
"""
from __future__ import unicode_literals
import re
import warnings
from collections import Counter
from ..exceptions import XMLSchemaKeyError, XMLSchemaTypeError, XMLSchemaValueError, XMLSchemaWarning
from ..namespaces import XSD_NAMESPACE
from ..qnames import XSD_INCLUDE, XSD_IMPORT, XSD_REDEFINE, XSD_OVERRIDE, XSD_NOTATION, XSD_ANY_TYPE, \
XSD_SIMPLE_TYPE, XSD_COMPLEX_TYPE, XSD_GROUP, XSD_ATTRIBUTE, XSD_ATTRIBUTE_GROUP, XSD_ELEMENT
from ..helpers import get_qname, local_name
from ..namespaces import NamespaceResourcesMap
from . import XMLSchemaNotBuiltError, XMLSchemaModelError, XMLSchemaModelDepthError, XsdValidator, \
XsdKeyref, XsdComponent, XsdAttribute, XsdSimpleType, XsdComplexType, XsdElement, XsdAttributeGroup, \
XsdGroup, XsdNotation, XsdAssert
from .builtins import xsd_builtin_types_factory
def camel_case_split(s):
"""
Split words of a camel case string
"""
return re.findall(r'[A-Z]?[a-z]+|[A-Z]+(?=[A-Z]|$)', s)
def iterchildren_by_tag(tag):
"""
Defines a generator that produce all child elements that have a specific tag.
"""
def iterfind_function(elem):
for e in elem:
if e.tag == tag:
yield e
iterfind_function.__name__ = str('iterfind_xsd_%ss' % '_'.join(camel_case_split(local_name(tag))).lower())
return iterfind_function
iterchildren_xsd_import = iterchildren_by_tag(XSD_IMPORT)
iterchildren_xsd_include = iterchildren_by_tag(XSD_INCLUDE)
iterchildren_xsd_redefine = iterchildren_by_tag(XSD_REDEFINE)
iterchildren_xsd_override = iterchildren_by_tag(XSD_OVERRIDE)
#
# Defines the load functions for XML Schema structures
def create_load_function(filter_function):
def load_xsd_globals(xsd_globals, schemas):
redefinitions = []
for schema in schemas:
target_namespace = schema.target_namespace
for elem in iterchildren_xsd_redefine(schema.root):
location = elem.get('schemaLocation')
if location is None:
continue
for child in filter_function(elem):
qname = get_qname(target_namespace, child.attrib['name'])
redefinitions.append((qname, child, schema, schema.includes[location]))
for elem in filter_function(schema.root):
qname = get_qname(target_namespace, elem.attrib['name'])
try:
xsd_globals[qname].append((elem, schema))
except KeyError:
xsd_globals[qname] = (elem, schema)
except AttributeError:
xsd_globals[qname] = [xsd_globals[qname], (elem, schema)]
tags = Counter([x[0] for x in redefinitions])
for qname, elem, schema, redefined_schema in redefinitions:
# Checks multiple redefinitions
if tags[qname] > 1:
tags[qname] = 1
redefined_schemas = [x[3] for x in redefinitions if x[0] == qname]
if any(redefined_schemas.count(x) > 1 for x in redefined_schemas):
schema.parse_error(
"multiple redefinition for {} {!r}".format(local_name(elem.tag), qname), elem
)
else:
redefined_schemas = {x[3]: x[2] for x in redefinitions if x[0] == qname}
for rs, s in redefined_schemas.items():
while True:
try:
s = redefined_schemas[s]
except KeyError:
break
if s is rs:
schema.parse_error(
"circular redefinition for {} {!r}".format(local_name(elem.tag), qname), elem
)
break
# Append redefinition
try:
xsd_globals[qname].append((elem, schema))
except KeyError:
schema.parse_error("not a redefinition!", elem)
# xsd_globals[qname] = elem, schema
except AttributeError:
xsd_globals[qname] = [xsd_globals[qname], (elem, schema)]
return load_xsd_globals
load_xsd_simple_types = create_load_function(iterchildren_by_tag(XSD_SIMPLE_TYPE))
load_xsd_attributes = create_load_function(iterchildren_by_tag(XSD_ATTRIBUTE))
load_xsd_attribute_groups = create_load_function(iterchildren_by_tag(XSD_ATTRIBUTE_GROUP))
load_xsd_complex_types = create_load_function(iterchildren_by_tag(XSD_COMPLEX_TYPE))
load_xsd_elements = create_load_function(iterchildren_by_tag(XSD_ELEMENT))
load_xsd_groups = create_load_function(iterchildren_by_tag(XSD_GROUP))
load_xsd_notations = create_load_function(iterchildren_by_tag(XSD_NOTATION))
def create_lookup_function(xsd_classes):
if isinstance(xsd_classes, tuple):
types_desc = ' or '.join([c.__name__ for c in xsd_classes])
else:
types_desc = xsd_classes.__name__
def lookup(global_map, qname, tag_map):
try:
obj = global_map[qname]
except KeyError:
if '{' in qname:
raise XMLSchemaKeyError("missing a %s component for %r!" % (types_desc, qname))
raise XMLSchemaKeyError("missing a %s component for %r! As the name has no namespace "
"maybe a missing default namespace declaration." % (types_desc, qname))
else:
if isinstance(obj, xsd_classes):
return obj
elif isinstance(obj, tuple):
# Not built XSD global component without redefinitions
try:
elem, schema = obj
except ValueError:
return obj[0] # Circular build, simply return (elem, schema) couple
try:
factory_or_class = tag_map[elem.tag]
except KeyError:
raise XMLSchemaKeyError("wrong element %r for map %r." % (elem, global_map))
global_map[qname] = obj, # Encapsulate into a single-item tuple to catch circular builds
global_map[qname] = factory_or_class(elem, schema, parent=None)
return global_map[qname]
elif isinstance(obj, list):
# Not built XSD global component with redefinitions
try:
elem, schema = obj[0]
except ValueError:
return obj[0][0] # Circular build, simply return (elem, schema) couple
try:
factory_or_class = tag_map[elem.tag]
except KeyError:
raise XMLSchemaKeyError("wrong element %r for map %r." % (elem, global_map))
global_map[qname] = obj[0], # To catch circular builds
global_map[qname] = component = factory_or_class(elem, schema, parent=None)
# Apply redefinitions (changing elem involve a re-parsing of the component)
for elem, schema in obj[1:]:
component.redefine = component.copy()
component.redefine.parent = component
component.schema = schema
component.elem = elem
return global_map[qname]
else:
raise XMLSchemaTypeError(
"wrong instance %s for XSD global %r, a %s required." % (obj, qname, types_desc)
)
return lookup
lookup_notation = create_lookup_function(XsdNotation)
lookup_type = create_lookup_function((XsdSimpleType, XsdComplexType))
lookup_attribute = create_lookup_function(XsdAttribute)
lookup_attribute_group = create_lookup_function(XsdAttributeGroup)
lookup_group = create_lookup_function(XsdGroup)
lookup_element = create_lookup_function(XsdElement)
class XsdGlobals(XsdValidator):
"""
Mediator class for related XML schema instances. It stores the global
declarations defined in the registered schemas. Register a schema to
add it's declarations to the global maps.
:param validator: the origin schema class/instance used for creating the global maps.
:param validation: the XSD validation mode to use, can be 'strict', 'lax' or 'skip'.
"""
def __init__(self, validator, validation='strict'):
super(XsdGlobals, self).__init__(validation)
if not all(hasattr(validator, a) for a in ('meta_schema', 'BUILDERS_MAP')):
raise XMLSchemaValueError("The argument {!r} is not an XSD schema validator".format(validator))
self.validator = validator
self.namespaces = NamespaceResourcesMap() # Registered schemas by namespace URI
self.types = {} # Global types (both complex and simple)
self.attributes = {} # Global attributes
self.attribute_groups = {} # Attribute groups
self.groups = {} # Model groups
self.notations = {} # Notations
self.elements = {} # Global elements
self.substitution_groups = {} # Substitution groups
self.constraints = {} # Constraints (uniqueness, keys, keyref)
self.global_maps = (self.notations, self.types, self.attributes,
self.attribute_groups, self.groups, self.elements)
def __repr__(self):
return '%s(validator=%r, validation=%r)' % (self.__class__.__name__, self.validator, self.validation)
def copy(self, validator=None, validation=None):
"""Makes a copy of the object."""
obj = XsdGlobals(self.validator if validator is None else validator, validation or self.validation)
obj.namespaces.update(self.namespaces)
obj.types.update(self.types)
obj.attributes.update(self.attributes)
obj.attribute_groups.update(self.attribute_groups)
obj.groups.update(self.groups)
obj.notations.update(self.notations)
obj.elements.update(self.elements)
obj.substitution_groups.update(self.substitution_groups)
obj.constraints.update(self.constraints)
return obj
__copy__ = copy
def lookup_notation(self, qname):
return lookup_notation(self.notations, qname, self.validator.BUILDERS_MAP)
def lookup_type(self, qname):
return lookup_type(self.types, qname, self.validator.BUILDERS_MAP)
def lookup_attribute(self, qname):
return lookup_attribute(self.attributes, qname, self.validator.BUILDERS_MAP)
def lookup_attribute_group(self, qname):
return lookup_attribute_group(self.attribute_groups, qname, self.validator.BUILDERS_MAP)
def lookup_group(self, qname):
return lookup_group(self.groups, qname, self.validator.BUILDERS_MAP)
def lookup_element(self, qname):
return lookup_element(self.elements, qname, self.validator.BUILDERS_MAP)
def lookup(self, tag, qname):
if tag in (XSD_SIMPLE_TYPE, XSD_COMPLEX_TYPE):
return self.lookup_type(qname)
elif tag == XSD_ELEMENT:
return self.lookup_element(qname)
elif tag == XSD_GROUP:
return self.lookup_group(qname)
elif tag == XSD_ATTRIBUTE:
return self.lookup_attribute(qname)
elif tag == XSD_ATTRIBUTE_GROUP:
return self.lookup_attribute_group(qname)
elif tag == XSD_NOTATION:
return self.lookup_notation(qname)
else:
raise XMLSchemaValueError("wrong tag {!r} for an XSD global definition/declaration".format(tag))
@property
def built(self):
for schema in self.iter_schemas():
if not schema.built:
return False
return True
@property
def validation_attempted(self):
if self.built:
return 'full'
elif any([schema.validation_attempted == 'partial' for schema in self.iter_schemas()]):
return 'partial'
else:
return 'none'
@property
def validity(self):
if not self.namespaces:
return False
if all(schema.validity == 'valid' for schema in self.iter_schemas()):
return 'valid'
elif any(schema.validity == 'invalid' for schema in self.iter_schemas()):
return 'invalid'
else:
return 'notKnown'
@property
def resources(self):
return [(schema.url, schema) for schemas in self.namespaces.values() for schema in schemas]
@property
def all_errors(self):
errors = []
for schema in self.iter_schemas():
errors.extend(schema.all_errors)
return errors
def iter_components(self, xsd_classes=None):
if xsd_classes is None or isinstance(self, xsd_classes):
yield self
for xsd_global in self.iter_globals():
for obj in xsd_global.iter_components(xsd_classes):
yield obj
def iter_schemas(self):
"""Creates an iterator for the schemas registered in the instance."""
for ns_schemas in self.namespaces.values():
for schema in ns_schemas:
yield schema
def iter_globals(self):
"""
Creates an iterator for XSD global definitions/declarations.
"""
for global_map in self.global_maps:
for obj in global_map.values():
yield obj
def register(self, schema):
"""
Registers an XMLSchema instance.
"""
try:
ns_schemas = self.namespaces[schema.target_namespace]
except KeyError:
self.namespaces[schema.target_namespace] = [schema]
else:
if schema in ns_schemas:
return
elif not any([schema.url == obj.url and schema.__class__ == obj.__class__ for obj in ns_schemas]):
ns_schemas.append(schema)
def clear(self, remove_schemas=False, only_unbuilt=False):
"""
Clears the instance maps and schemas.
:param remove_schemas: removes also the schema instances.
:param only_unbuilt: removes only not built objects/schemas.
"""
if only_unbuilt:
not_built_schemas = {schema for schema in self.iter_schemas() if not schema.built}
if not not_built_schemas:
return
for global_map in self.global_maps:
for k in list(global_map.keys()):
obj = global_map[k]
if not isinstance(obj, XsdComponent) or obj.schema in not_built_schemas:
del global_map[k]
if k in self.substitution_groups:
del self.substitution_groups[k]
if k in self.constraints:
del self.constraints[k]
if remove_schemas:
namespaces = NamespaceResourcesMap()
for uri, value in self.namespaces.items():
for schema in value:
if schema not in not_built_schemas:
namespaces[uri] = schema
self.namespaces = namespaces
else:
for global_map in self.global_maps:
global_map.clear()
self.substitution_groups.clear()
self.constraints.clear()
if remove_schemas:
self.namespaces.clear()
def build(self):
"""
Build the maps of XSD global definitions/declarations. The global maps are
updated adding and building the globals of not built registered schemas.
"""
try:
meta_schema = self.namespaces[XSD_NAMESPACE][0]
except KeyError:
# Meta-schemas are not registered. If any of base namespaces is already registered
# create a new meta-schema, otherwise register the meta-schemas.
meta_schema = self.validator.meta_schema
if meta_schema is None:
raise XMLSchemaValueError("{!r} has not a meta-schema".format(self.validator))
if any(ns in self.namespaces for ns in meta_schema.BASE_SCHEMAS):
base_schemas = {k: v for k, v in meta_schema.BASE_SCHEMAS.items() if k not in self.namespaces}
meta_schema = self.validator.create_meta_schema(meta_schema.url, base_schemas, self)
for schema in self.iter_schemas():
if schema.meta_schema is not None:
schema.meta_schema = meta_schema
else:
for schema in meta_schema.maps.iter_schemas():
self.register(schema)
self.types.update(meta_schema.maps.types)
self.attributes.update(meta_schema.maps.attributes)
self.attribute_groups.update(meta_schema.maps.attribute_groups)
self.groups.update(meta_schema.maps.groups)
self.notations.update(meta_schema.maps.notations)
self.elements.update(meta_schema.maps.elements)
self.substitution_groups.update(meta_schema.maps.substitution_groups)
self.constraints.update(meta_schema.maps.constraints)
not_built_schemas = [schema for schema in self.iter_schemas() if not schema.built]
for schema in not_built_schemas:
schema._root_elements = None
# Load and build global declarations
load_xsd_notations(self.notations, not_built_schemas)
load_xsd_simple_types(self.types, not_built_schemas)
load_xsd_attributes(self.attributes, not_built_schemas)
load_xsd_attribute_groups(self.attribute_groups, not_built_schemas)
load_xsd_complex_types(self.types, not_built_schemas)
load_xsd_elements(self.elements, not_built_schemas)
load_xsd_groups(self.groups, not_built_schemas)
if not meta_schema.built:
xsd_builtin_types_factory(meta_schema, self.types)
for qname in self.notations:
self.lookup_notation(qname)
for qname in self.attributes:
self.lookup_attribute(qname)
for qname in self.attribute_groups:
self.lookup_attribute_group(qname)
for qname in self.types:
self.lookup_type(qname)
for qname in self.elements:
self.lookup_element(qname)
for qname in self.groups:
self.lookup_group(qname)
# Builds element declarations inside model groups.
for schema in not_built_schemas:
for group in schema.iter_components(XsdGroup):
group.build()
for schema in filter(lambda x: x.meta_schema is not None, not_built_schemas):
# Build key references and assertions (XSD meta-schema doesn't have any of them)
for constraint in schema.iter_components(XsdKeyref):
constraint.parse_refer()
for assertion in schema.iter_components(XsdAssert):
assertion.parse()
self._check_schema(schema)
if self.validation == 'strict' and not self.built:
raise XMLSchemaNotBuiltError(self, "global map %r not built!" % self)
def _check_schema(self, schema):
# Checks substitution groups circularities
for qname in self.substitution_groups:
xsd_element = self.elements[qname]
for e in xsd_element.iter_substitutes():
if e is xsd_element:
schema.parse_error("circularity found for substitution group with head element %r" % xsd_element)
if schema.XSD_VERSION > '1.0' and schema.default_attributes is not None:
if not isinstance(schema.default_attributes, XsdAttributeGroup):
schema.default_attributes = None
schema.parse_error("defaultAttributes={!r} doesn't match an attribute group of {!r}"
.format(schema.root.get('defaultAttributes'), schema), schema.root)
if schema.validation == 'skip':
return
# Check redefined global groups
for group in filter(lambda x: x.schema is schema and x.redefine is not None, self.groups.values()):
if not any(isinstance(e, XsdGroup) and e.name == group.name for e in group) \
and not group.is_restriction(group.redefine):
group.parse_error("The redefined group is an illegal restriction of the original group.")
# Check complex content types models
for xsd_type in schema.iter_components(XsdComplexType):
if not isinstance(xsd_type.content_type, XsdGroup):
continue
base_type = xsd_type.base_type
if xsd_type.derivation == 'restriction':
if base_type and base_type.name != XSD_ANY_TYPE and base_type.is_complex():
if not xsd_type.content_type.is_restriction(base_type.content_type):
xsd_type.parse_error("The derived group is an illegal restriction of the base type group.")
try:
xsd_type.content_type.check_model()
except XMLSchemaModelDepthError:
msg = "cannot verify the content model of %r due to maximum recursion depth exceeded" % xsd_type
schema.warnings.append(msg)
warnings.warn(msg, XMLSchemaWarning, stacklevel=4)
except XMLSchemaModelError as err:
if self.validation == 'strict':
raise
xsd_type.errors.append(err)
| brunato/xmlschema | xmlschema/validators/globals_.py | Python | mit | 22,118 |
import superimport
import daft
# Colors.
p_color = {"ec": "#46a546"}
s_color = {"ec": "#f89406"}
r_color = {"ec": "#dc143c"}
SUB = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
pgm = daft.PGM(shape=(12, 5), origin=(-1,0))
pgm.add_node("Ax", r"$\theta^h$", 2.5, 3, observed=False)
for i in range(4):
pgm.add_node("x{}".format(i), r"$x_{}$".format(i), i+1, 1, observed=True)
pgm.add_node("hx{}".format(i), r"$h^x_{}$".format(i), i + 1, 2, observed=False)
pgm.add_edge("Ax", "hx{}".format(i))
pgm.add_edge("hx{}".format(i), "x{}".format(i))
if i>0:
pgm.add_edge("hx{}".format(i - 1), "hx{}".format(i))
pgm.add_node("Ay", r"$\theta^h$", 7.5, 3, observed=False)
delta = 5
for i in range(4):
pgm.add_node("y{}".format(i), r"$y_{}$".format(i), i+1+delta, 1, observed=True)
pgm.add_node("hy{}".format(i), r"$h^y_{}$".format(i), i + 1+delta, 2, observed=False)
pgm.add_edge("Ay", "hy{}".format(i))
pgm.add_edge("hy{}".format(i), "y{}".format(i))
if i>0:
pgm.add_edge("hy{}".format(i - 1), "hy{}".format(i))
pgm.add_node("z", r"$z$", 5, 4, observed=False)
pgm.add_edge("z", "Ax")
pgm.add_edge("z", "Ay")
pgm.add_node("thetax", r"$\theta^x$", 0, 1, observed=False)
pgm.add_node("thetay", r"$\theta^y$", 10, 1, observed=False)
pgm.add_edge("thetax", "x0")
pgm.add_edge("thetay", "y3")
pgm.render()
pgm.savefig('../figures/visual_spelling_hmm_pgm.pdf')
pgm.show() | probml/pyprobml | scripts/visual_spelling_hmm_daft.py | Python | mit | 1,434 |
#!/usr/bin/env python
from numpy import loadtxt
from pylab import *
from scipy import *
from scipy.optimize import leastsq
#import sys
filename = 'data_CW.txt'
g2 = ['%g' %(0.5*200*(i+1)) for i in range(5)]
print g2
def fit(line):
filename = 'CW_g2_20_LW_0.7_dt_0.01_%s_uWcm2_1e-10_conS_10_O=12.txt' %line
print filename
#filename = sys.argv[1]
data = loadtxt(filename,skiprows=1)
x = data[:,0]
y = data[:,1]
x = x*1e7
y = y*1e10
fit_func = lambda p,x : p[0]+p[3]*(p[2]/2/pi)/((x-p[1])**2+(p[2]/2)**2)+p[4]*x
error_func = lambda p,x,y : fit_func(p,x)-y
p0 = [ 1000 , 0 , 1 , -50, 0]
p1,sucess = leastsq(error_func,p0[:],args=(x,y), maxfev=50000)
xfit = linspace(min(x),max(x), 1001)
plot(x,y,'.')
plot(xfit,fit_func(p1,xfit),'r-')
savefig('CW_g2_20_LW_0.7_dt_0.01_%s_uWcm2_1e-10_conS_10_O=12.png'%line)
clf()
p1[0]=p1[0]/1e10
p1[1]=p1[1]/1e7*1e9
p1[2]=p1[2]/1e7*1e9
p1[3]=p1[3]/1e10
pfinal=r_[float(line)*2,p1]
return pfinal
a = [fit(gg) for gg in g2]
data = open( filename, 'w')
data.write('detune(uW)\ty_offset\tx_offset(Hz)\twidth(Hz)\tpeak\n')
for element in a:
for index,value in enumerate(element):
data.write('%g' %value)
if (index+1 < len(element)):
data.write("\t")
data.write('\n')
data.close()
#data.write('')
#savetxt('data.txt', a)
| imrehg/bloch | fit_CW.py | Python | mit | 1,314 |
#Credits - https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/3
import torch
batch_size = 5
nb_digits = 10
# Dummy input that HAS to be 2D for the scatter (you can use view(-1,1) if needed)
y = torch.LongTensor(batch_size,1).random_() % nb_digits
# One hot encoding buffer that you create out of the loop and just keep reusing
y_onehot = torch.FloatTensor(batch_size, nb_digits)
# In your for loop
y_onehot.zero_()
y_onehot.scatter_(1, y, 1)
print(y)
print(y_onehot)
| skrish13/pytorch-snippets | one-hot.py | Python | mit | 486 |
def get_class(obj):
return obj.__class__
def get_class_name(obj):
return obj.__class__.__name__
def get_methods_on_class(cls):
"""
Returns a list of tuples containing of all the method names and methods in a class
i.e [(method_name1, method1), (method_name2, method2), ...]
"""
methods = []
methods_to_ignore = object.__dict__.keys() # __init__, __str__, etc.
for attr_name, attr_value in cls.__dict__.items():
match_str = '<function %s at' % attr_name
if match_str in str(attr_value):
if attr_name not in methods_to_ignore:
methods += [(attr_name, attr_value)]
return methods
| crispycret/crispys_webkit | crispys_webkit/objprope.py | Python | mit | 614 |
# -*- encoding: utf-8 -*-
# JN 2016-02-16
"""
Plot a spectrum from the first 1000 records of data
"""
import sys
import scipy.signal as sig
import matplotlib.pyplot as mpl
from combinato import NcsFile, DefaultFilter
def plot_spectrum(fname):
fid = NcsFile(fname)
rawdata = fid.read(0, 1000)
data = rawdata * (1e6 * fid.header['ADBitVolts'])
fs = 1/fid.timestep
my_filter = DefaultFilter(fid.timestep)
filt_data = my_filter.filter_extract(data)
[f, p] = sig.welch(data, fs, nperseg=32768)
[f_filt, p_filt] = sig.welch(filt_data, fs, nperseg=32768)
fig = mpl.figure()
plot = fig.add_subplot(1, 1, 1)
plot.plot(f, p, label='Unfiltered')
plot.plot(f_filt, p_filt, label='Filtered')
plot.set_yscale('log')
plot.legend()
plot.set_ylabel(r'$\mu\mathrm{V}^2/\mathrm{Hz}$')
plot.set_xlabel(r'$\mathrm{Hz}$')
def main():
plot_spectrum(sys.argv[1])
mpl.show()
if __name__ == '__main__':
main()
| jniediek/combinato | tools/mini_spectrum.py | Python | mit | 970 |
#!/usr/bin/env python
from pylab import *
from scipy.io import mmread
A = mmread('poisson3Db.mtx')
fig, (ax1, ax2) = subplots(2, 1, sharex=True, figsize=(8,10), gridspec_kw=dict(height_ratios=[4,1]))
ax1.spy(A, marker='.', markersize=0.25, alpha=0.2)
ax2.semilogy(A.diagonal())
ax2.set_ylabel('Diagonal')
tight_layout()
savefig('Poisson3D.png')
| ddemidov/amgcl | tutorial/1.poisson3Db/plot.py | Python | mit | 348 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ComputeManagementClientConfiguration(Configuration):
"""Configuration for ComputeManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(ComputeManagementClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2016-03-30"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-compute/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/_configuration.py | Python | mit | 3,341 |
from .base import PInstruction
from ..ptypes import PType
class Ixa(PInstruction):
"""Indexed address computation:
STORE[SP - 1] := STORE[SP - 1] + STORE[SP] * q
SP := SP - 1
"""
def __init__(self, q: int) -> None:
self.q = q
def emit(self) -> str:
return 'ixa %d' % self.q
class Chk(PInstruction):
"""Error if the integer on top of the stack isn't in [p, q]."""
def __init__(self, p: int, q: int) -> None:
self.p = p
self.q = q
def emit(self) -> str:
return 'chk %d %d' % (self.p, self.q)
class Dpl(PInstruction):
"""Duplicate the top-of-stack."""
def __init__(self, t: PType) -> None:
self.t = t
def emit(self) -> str:
return 'dpl %s' % self.t.letter
class Ldd(PInstruction):
"""Dynamic array loading helper:
SP := SP + 1
STORE[SP] := STORE[STORE[SP - 3] + q]
"""
def __init__(self, q: int) -> None:
self.q = q
def emit(self) -> str:
return 'ldd %d' % self.q
class Sli(PInstruction):
"""Slide: remove the element *below* the top element on the stack. The type in `sli T`
should be that of the top element."""
def __init__(self, t: PType) -> None:
self.t = t
def emit(self) -> str:
return 'sli %s' % self.t.letter
| Sibert-Aerts/c2p | src/c2p/instructions/array.py | Python | mit | 1,343 |
from __future__ import print_function
import logging
class ShutItSendSpec(object):
"""Specification for arguments to send to shutit functions.
"""
def __init__(self,
shutit_pexpect_child,
send=None,
send_dict=None,
expect=None,
timeout=None,
check_exit=None,
fail_on_empty_before=True,
record_command=True,
exit_values=None,
echo=None,
escape=False,
check_sudo=True,
retry=3,
note=None,
assume_gnu=True,
follow_on_commands=None,
searchwindowsize=None,
maxread=None,
delaybeforesend=None,
secret=False,
nonewline=False,
user=None,
password=None,
is_ssh=None,
go_home=True,
prompt_prefix=None,
remove_on_match=None,
fail_on_fail=True,
ignore_background=False,
run_in_background=False,
block_other_commands=True,
wait_cadence=2,
loglevel=logging.INFO):
"""Specification for arguments to send to shutit functions.
@param send: String to send, ie the command being issued. If set to
None, we consume up to the expect string, which is useful if we
just matched output that came before a standard command that
returns to the prompt.
@param send_dict: dict of sends and expects, eg:
{'interim prompt:',['some input',False],'input password:':['mypassword',True]}
Note that the boolean indicates whether the match results in the removal of the send dict expects from the interaction and assumes a prompt follows.
@param expect: String that we expect to see in the output. Usually a
prompt. Defaults to currently-set expect string (see
set_default_shutit_pexpect_session_expect)
@param shutit_pexpect_child: pexpect child to issue command to.
@param timeout: Timeout on response
@param check_exit: Whether to check the shell exit code of the passed-in
command. If the exit value was non-zero an error is thrown.
(default=None, which takes the currently-configured check_exit
value) See also fail_on_empty_before.
@param fail_on_empty_before: If debug is set, fail on empty match output
string (default=True) If this is set to False, then we don't
check the exit value of the command.
@param record_command: Whether to record the command for output at end.
As a safety measure, if the command matches any 'password's then
we don't record it.
@param exit_values: Array of acceptable exit values as strings
@param echo: Whether to suppress any logging output from pexpect to the
terminal or not. We don't record the command if this is set to
False unless record_command is explicitly passed in as True.
@param escape: Whether to escape the characters in a bash-friendly way, eg $'\\Uxxxxxx'
@param check_sudo: Check whether we have sudo available and if we already have sudo rights
cached.
@param retry: Number of times to retry the command if the first attempt
doesn't work. Useful if going to the network
@param note: If a note is passed in, and we are in walkthrough mode,
pause with the note printed
@param assume_gnu: Assume the gnu version of commands, which are not in
@param follow_on_commands: A dictionary of the form: {match_string: command, match_string2: command2}
which runs commands based on whether the output matched.
Follow-on commands are always foregrounded and always ignore backgrounded processes.
@param searchwindowsize: Passed into pexpect session
@param maxread: Passed into pexpect session
@param delaybeforesend: Passed into pexpect session
@param secret: Whether what is being sent is a secret
@param nonewline: Whether to omit the newline from the send
@param user: If logging in, user to use. Default is 'root'.
@param password: If logging in, password to use. Default is 'root'.
@param is_ssh: Indicates whether the login is an ssh one if it is not an ssh command
@param go_home: On logging in, whether to go to the home dir. Default is True.
@param prompt_prefix: Override of random prompt prefix created by prompt setup.
@param remove_on_match: If the item matches, remove the send_dict from future expects (eg if
it's a password). This makes the 'am I logged in yet?' checking more robust.
@param ignore_background: Whether to block if there are background tasks
running in this session that are blocking, or ignore ALL
background tasks and run anyway. Default is False.
@param run_in_background: Whether to run in the background
@param block_other_commands: Whether to block other commands from running
(unless ignore_background is set on those other commands).
Default is True.
@param wait_cadence: If blocked and waiting on a background tasks, wait this
number of seconds before re-checking. Default is 2.
@param loglevel: Log level at which to operate.
Background Commands
===================
+------------------+-------------------+----------------------+------------------------------------------+
|run_in_background | ignore_background | block_other_commands | Outcome |
+------------------+-------------------+----------------------+------------------------------------------+
|T | T | T | 'Just run in background and queue others'|
| | | | Runs the command in the background, |
| | | | ignoring all blocking background tasks |
| | | | even if they are blocking, and blocking |
| | | | new background tasks (if they don't |
| | | | ignore blocking background tasks). |
+------------------+-------------------+----------------------+------------------------------------------+
|T | F | T | 'Run in background if not blocked, and |
| | | | queue others' |
| | | | Runs the command in the background, |
| | | | but will block if there are blocking |
| | | | background tasks running. It will block |
| | | | new background tasks (if they don't |
| | | | ignore blocking background tasks). |
+------------------+-------------------+----------------------+------------------------------------------+
|T | F | F | 'Run in background if not blocked, and |
| | | | let others run' |
+------------------+-------------------+----------------------+------------------------------------------+
|F | T | N/A | 'Run in foreground, ignoring any |
| | | | background commands and block any new |
| | | | background commands.' |
+------------------+-------------------+----------------------+------------------------------------------+
|F | F | N/A | 'Run in foreground, blocking if there are|
| | | | any background tasks running, and |
| | | | blocking any new background commands.' |
+------------------+-------------------+----------------------+------------------------------------------+
Example
=======
Scenario is that we want to:
update the file database with 'updatedb'
then find a file that we expect to be in that database with 'locate file_to_find'
and then add a line to that file with 'echo line >> file_to_find'
Statement: I want to run this command in the background in this ShutIt session.
I want to stop other background commands from running.
I don't care if other background commands are running which block this.
Example send: updatedb
Args: run_in_background=True, ignore_background=True, block_other_commands=True
Statement: I want to run this command in the background in this ShutIt session.
I want to stop other background commands from running.
I don't want to run if other blocking background commands are running.
Example send: locate file_to_find
Args: run_in_background=True, ignore_background=False, block_other_commands=True
Statement: I just want to run this command in the background in the ShutIt session and forget about it.
I don't care if there are other background tasks running which block this.
I don't want to block other commands, nothing will depend on this completing.
Example send: echo 'Add line to file' >> /path/to/file_to_find
Args: run_in_background=True, ignore_background=True, block_other_commands=False
"""
self.send = send
self.original_send = send
self.send_dict = send_dict
self.expect = expect
self.shutit_pexpect_child = shutit_pexpect_child
self.timeout = timeout
self.check_exit = check_exit
self.fail_on_empty_before = fail_on_empty_before
self.record_command = record_command
self.exit_values = exit_values
self.echo = echo
self.escape = escape
self.check_sudo = check_sudo
self.retry = retry
self.note = note
self.assume_gnu = assume_gnu
self.follow_on_commands = follow_on_commands
self.searchwindowsize = searchwindowsize
self.maxread = maxread
self.delaybeforesend = delaybeforesend
self.secret = secret
self.nonewline = nonewline
self.loglevel = loglevel
self.user = user
self.password = password
self.is_ssh = is_ssh
self.go_home = go_home
self.prompt_prefix = prompt_prefix
self.remove_on_match = remove_on_match
self.fail_on_fail = fail_on_fail
self.ignore_background = ignore_background
self.run_in_background = run_in_background
self.block_other_commands = block_other_commands
self.wait_cadence = wait_cadence
# BEGIN Setup/checking
self.started = False
if self.check_exit and self.run_in_background:
self.check_exit = False
#if send_dict and run_in_background:
#assert False, shutit_util.print_debug()
# END Setup/checking
# send_dict can come in with items that are: val:string, or val:[string,boolean]
# ensure they end up as the latter, defaulting to false.
if self.send_dict is not None:
assert isinstance(self.send_dict, dict), shutit_util.print_debug()
for key in self.send_dict:
val = self.send_dict[key]
assert isinstance(val,(str,list)), shutit_util.print_debug()
if isinstance(val,str):
self.send_dict.update({key:[val,False]})
elif isinstance(val,list):
assert len(val) == 2, shutit_util.print_debug()
else:
assert False, shutit_util.print_debug(msg='send_dict check should not get here')
if self.exit_values is None:
self.exit_values = ['0',]
def __str__(self):
string = '\n---- Sendspec object BEGIN ----'
string += '| assume_gnu = ' + str(self.assume_gnu)
string += '| block_other_commands = ' + str(self.block_other_commands)
string += '| check_exit = ' + str(self.check_exit)
string += '| check_sudo = ' + str(self.check_sudo)
string += '| delaybeforesend = ' + str(self.delaybeforesend)
string += '| echo = ' + str(self.echo)
string += '| escape = ' + str(self.escape)
string += '| exit_values = ' + str(self.exit_values)
string += '| expect = ' + str(self.expect)
string += '| fail_on_empty_before = ' + str(self.fail_on_empty_before)
string += '| fail_on_fail = ' + str(self.fail_on_fail)
string += '| follow_on_commands = ' + str(self.follow_on_commands)
string += '| go_home = ' + str(self.go_home)
string += '| ignore_background = ' + str(self.ignore_background)
string += '| is_ssh = ' + str(self.is_ssh)
string += '| loglevel = ' + str(self.loglevel)
string += '| maxread = ' + str(self.maxread)
string += '| nonewline = ' + str(self.nonewline)
string += '| note = ' + str(self.note)
string += '| original_send = ' + str(self.original_send)
string += '| password = ' + str(self.password)
string += '| prompt_prefix = ' + str(self.prompt_prefix)
string += '| record_command = ' + str(self.record_command)
string += '| remove_on_match = ' + str(self.remove_on_match)
string += '| retry = ' + str(self.retry)
string += '| run_in_background = ' + str(self.run_in_background)
string += '| searchwindowsize = ' + str(self.searchwindowsize)
string += '| secret = ' + str(self.secret)
string += '| send = ' + str(self.send)
string += '| send_dict = ' + str(self.send_dict)
string += '| started = ' + str(self.started)
string += '| timeout = ' + str(self.timeout)
string += '| user = ' + str(self.user)
string += '| wait_cadence = ' + str(self.wait_cadence)
string += '|---- Sendspec object ENDS ----'
return string
| ianmiell/shutit | shutit_sendspec.py | Python | mit | 15,169 |
from os.path import join
import sh
from pythonforandroid.recipe import NDKRecipe
from pythonforandroid.util import current_directory
from pythonforandroid.logger import shprint
from multiprocessing import cpu_count
class OpenCVRecipe(NDKRecipe):
'''
.. versionchanged:: 0.7.1
rewrote recipe to support the python bindings (cv2.so) and enable the
build of most of the libraries of the opencv's package, so we can
process images, videos, objects, photos...
'''
version = '4.0.1'
url = 'https://github.com/opencv/opencv/archive/{version}.zip'
depends = ['numpy']
patches = ['patches/p4a_build.patch']
generated_libraries = [
'libopencv_features2d.so',
'libopencv_imgproc.so',
'libopencv_stitching.so',
'libopencv_calib3d.so',
'libopencv_flann.so',
'libopencv_ml.so',
'libopencv_videoio.so',
'libopencv_core.so',
'libopencv_highgui.so',
'libopencv_objdetect.so',
'libopencv_video.so',
'libopencv_dnn.so',
'libopencv_imgcodecs.so',
'libopencv_photo.so'
]
def get_lib_dir(self, arch):
return join(self.get_build_dir(arch.arch), 'build', 'lib', arch.arch)
def get_recipe_env(self, arch):
env = super(OpenCVRecipe, self).get_recipe_env(arch)
env['ANDROID_NDK'] = self.ctx.ndk_dir
env['ANDROID_SDK'] = self.ctx.sdk_dir
return env
def build_arch(self, arch):
build_dir = join(self.get_build_dir(arch.arch), 'build')
shprint(sh.mkdir, '-p', build_dir)
with current_directory(build_dir):
env = self.get_recipe_env(arch)
python_major = self.ctx.python_recipe.version[0]
python_include_root = self.ctx.python_recipe.include_root(arch.arch)
python_site_packages = self.ctx.get_site_packages_dir()
python_link_root = self.ctx.python_recipe.link_root(arch.arch)
python_link_version = self.ctx.python_recipe.major_minor_version_string
if 'python3' in self.ctx.python_recipe.name:
python_link_version += 'm'
python_library = join(python_link_root,
'libpython{}.so'.format(python_link_version))
python_include_numpy = join(python_site_packages,
'numpy', 'core', 'include')
shprint(sh.cmake,
'-DP4A=ON',
'-DANDROID_ABI={}'.format(arch.arch),
'-DANDROID_STANDALONE_TOOLCHAIN={}'.format(self.ctx.ndk_dir),
'-DANDROID_NATIVE_API_LEVEL={}'.format(self.ctx.ndk_api),
'-DANDROID_EXECUTABLE={}/tools/android'.format(env['ANDROID_SDK']),
'-DCMAKE_TOOLCHAIN_FILE={}'.format(
join(self.ctx.ndk_dir, 'build', 'cmake',
'android.toolchain.cmake')),
# Make the linkage with our python library, otherwise we
# will get dlopen error when trying to import cv2's module.
'-DCMAKE_SHARED_LINKER_FLAGS=-L{path} -lpython{version}'.format(
path=python_link_root,
version=python_link_version),
'-DBUILD_WITH_STANDALONE_TOOLCHAIN=ON',
# Force to build as shared libraries the cv2's dependant
# libs or we will not be able to link with our python
'-DBUILD_SHARED_LIBS=ON',
'-DBUILD_STATIC_LIBS=OFF',
# Disable some opencv's features
'-DBUILD_opencv_java=OFF',
'-DBUILD_opencv_java_bindings_generator=OFF',
# '-DBUILD_opencv_highgui=OFF',
# '-DBUILD_opencv_imgproc=OFF',
# '-DBUILD_opencv_flann=OFF',
'-DBUILD_TESTS=OFF',
'-DBUILD_PERF_TESTS=OFF',
'-DENABLE_TESTING=OFF',
'-DBUILD_EXAMPLES=OFF',
'-DBUILD_ANDROID_EXAMPLES=OFF',
# Force to only build our version of python
'-DBUILD_OPENCV_PYTHON{major}=ON'.format(major=python_major),
'-DBUILD_OPENCV_PYTHON{major}=OFF'.format(
major='2' if python_major == '3' else '3'),
# Force to install the `cv2.so` library directly into
# python's site packages (otherwise the cv2's loader fails
# on finding the cv2.so library)
'-DOPENCV_SKIP_PYTHON_LOADER=ON',
'-DOPENCV_PYTHON{major}_INSTALL_PATH={site_packages}'.format(
major=python_major, site_packages=python_site_packages),
# Define python's paths for: exe, lib, includes, numpy...
'-DPYTHON_DEFAULT_EXECUTABLE={}'.format(self.ctx.hostpython),
'-DPYTHON{major}_EXECUTABLE={host_python}'.format(
major=python_major, host_python=self.ctx.hostpython),
'-DPYTHON{major}_INCLUDE_PATH={include_path}'.format(
major=python_major, include_path=python_include_root),
'-DPYTHON{major}_LIBRARIES={python_lib}'.format(
major=python_major, python_lib=python_library),
'-DPYTHON{major}_NUMPY_INCLUDE_DIRS={numpy_include}'.format(
major=python_major, numpy_include=python_include_numpy),
'-DPYTHON{major}_PACKAGES_PATH={site_packages}'.format(
major=python_major, site_packages=python_site_packages),
self.get_build_dir(arch.arch),
_env=env)
shprint(sh.make, '-j' + str(cpu_count()), 'opencv_python' + python_major)
# Install python bindings (cv2.so)
shprint(sh.cmake, '-DCOMPONENT=python', '-P', './cmake_install.cmake')
# Copy third party shared libs that we need in our final apk
sh.cp('-a', sh.glob('./lib/{}/lib*.so'.format(arch.arch)),
self.ctx.get_libs_dir(arch.arch))
recipe = OpenCVRecipe()
| kronenpj/python-for-android | pythonforandroid/recipes/opencv/__init__.py | Python | mit | 6,295 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "iriot.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| j-windsor/iRiot-WebApp | manage.py | Python | mit | 248 |
"""This module defines the core CLF spidering API.
For example, a spider author creates a spider by
creating a new class which derives from :py:class:`Spider`.
Next the spider author implements a :py:meth:`Spider.crawl`
which returns an instance of :py:class:`CrawlResponse`.
Both :py:class:`Spider` and :py:class:`CrawlResponse`
are defined in this module.
"""
import copy
import datetime
import getpass
import hashlib
import inspect
import imp
import importlib
import logging
import logging.config
import os
import pkg_resources
import pkgutil
import re
import sys
import time
import tempfile
import colorama
import dateutil.parser
import jsonschema
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.chrome.options import Options
import selenium.webdriver.support.select
import cloudfeaster_extension
from . import jsonschemas
from . import privacy
from . import util
_logger = logging.getLogger(__name__)
# making calls to time.sleep() easier to understand
_quarter_of_a_second = 0.25
# making calls to time.sleep() easier to understand
_half_a_second = 0.5
# making calls to time.sleep() easier to understand
_one_second = 1
def _snake_to_camel_case(s):
return re.sub(
'_(.)',
lambda match: match.group(1).upper(),
s)
def _utc_now():
return datetime.datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
class Spider(object):
"""Base class for all spiders"""
_replace_spiders_postfix_reg_ex = re.compile(r'[-_]spiders$', re.IGNORECASE)
@classmethod
def _get_crawl_method_arg_names_for_use_as_factors(cls):
"""Returns the list of argument names for
crawl(). If the spider doesn't have a crawl method
returns None.
"""
def is_crawl_instance_method(t):
if not inspect.isfunction(t):
return False
# if inspect.isclass(t.__self__):
# return False
# permit a concrete spider (cls) to be
# derived from an abstract base class
# spider that's defined in a different
# module but also make sure somewhere
# in the inheritence hierarchy that something
# other than Spider has defined a crawl
# method
if t.__module__ == Spider.__module__:
return False
return t.__name__ == "crawl"
for (_, t) in inspect.getmembers(cls, is_crawl_instance_method):
# 2: below since all crawl methods should have @ least 2 args
# arg 1 = self ie. the spider instance
# arg 2 = browser
return inspect.getfullargspec(t).args[2:]
return None
@classmethod
def get_validated_metadata(cls):
"""Spiders supply their metadata by overriding
:py:meth:`Spider.get_metadata` and
those wishing to retrieve a spider's metadata
should call this method. This method validates
and potentially modifies the metadata returned
by :py:meth:`Spider.get_metadata`
to add aspects of the metadata which can be
determined by inspecting the spider's source code.
"""
# making a copy of the metadata because we're going to
# potentially make modifications to the metadata and
# didn't want to mess with the original
metadata = copy.deepcopy(cls.get_metadata())
try:
jsonschema.validate(metadata, jsonschemas.spider_metadata)
except Exception as ex:
raise SpiderMetadataError(cls, ex=ex)
if not metadata.get('categories', []):
#
# A spider's fully qualified name will be something like gaming_spiders.miniclip.Spider
# so in this case the class name is Spider, module name is miniclip and the package
# name is gaming_spiders.
#
# Spiders are grouped into categories.
# A spider can appear in more than one category.
# A spider's categories are declared as part of the spider's metadata.
# If no categories are declared in a spider's metadata then the
# spider's default category is used.
#
metadata['categories'] = [cls.get_default_category()]
metadata['absoluteFilename'] = sys.modules[cls.__module__].__file__
metadata['fullyQualifiedClassName'] = '{module}.{cls}'.format(
module=cls.__module__,
cls=cls.__name__)
crawl_method_arg_names = cls._get_crawl_method_arg_names_for_use_as_factors()
if crawl_method_arg_names is None:
message_detail = "crawl() method arg names not found"
raise SpiderMetadataError(cls, message_detail=message_detail)
identifying_factors = metadata.get("identifyingFactors", {})
metadata["identifyingFactors"] = identifying_factors
authenticating_factors = metadata.get("authenticatingFactors", {})
metadata["authenticatingFactors"] = authenticating_factors
factors = list(identifying_factors.keys())
factors.extend(authenticating_factors.keys())
camel_cased_factors = [_snake_to_camel_case(factor) for factor in factors]
expected_camel_cased_crawl_method_arg_names = camel_cased_factors[:]
camel_cased_crawl_method_arg_names = [_snake_to_camel_case(arg) for arg in crawl_method_arg_names]
# :QUESTION: why is a `set()` being used here?
if set(expected_camel_cased_crawl_method_arg_names) != set(camel_cased_crawl_method_arg_names):
message_detail = "crawl() arg names and factor names don't match"
raise SpiderMetadataError(cls, message_detail=message_detail)
#
# factor display order ...
#
factor_display_order = metadata.get("factorDisplayOrder", None)
if factor_display_order is None:
metadata["factorDisplayOrder"] = camel_cased_crawl_method_arg_names
else:
if set(factors) != set(factor_display_order):
message_detail = "factors and factor display order don't match"
raise SpiderMetadataError(cls, message_detail=message_detail)
#
# factor display names ...
#
# can only have factor display names for previously identified
# identifying and authenticating factors
factor_display_names = metadata.get("factorDisplayNames", {})
if not set(factor_display_names).issubset(set(factors)):
message_detail = "unknown factor(s) in factor display names"
raise SpiderMetadataError(cls, message_detail=message_detail)
# ensure each factor has a factor display name available for
# the default language
for factor_name in factors:
lang_to_display_name = factor_display_names.get(factor_name, {})
if "" not in lang_to_display_name:
lang_to_display_name[""] = factor_name
factor_display_names[factor_name] = lang_to_display_name
metadata["factorDisplayNames"] = factor_display_names
#
# TTL
#
metadata["ttl"] = metadata.get("ttl", "60s")
#
# max concurrent crawls
#
metadata["maxConcurrentCrawls"] = metadata.get("maxConcurrentCrawls", 3)
#
# parnoia level
#
metadata["paranoiaLevel"] = metadata.get("paranoiaLevel", "low")
#
# maximum crawl time
#
metadata["maxCrawlTime"] = metadata.get("maxCrawlTime", "30s")
return metadata
@classmethod
def get_default_category(cls):
"""A spider's fully qualified name will be something like gaming_spiders.miniclip.Spider
A spider's default category is everything up to but not including the first period a
spider's fully qualified name.
get_default_category() simplifies specificiation of metadata when spiders are in
mulitple categories. See sample code below.
def get_metadata(cls):
return {
'url': 'https://www.xe.com/?cn=cad',
'categories': [
cls.get_default_category(),
'fx_rates',
],
}
"""
return cls._replace_spiders_postfix_reg_ex.sub('', cls.__module__.split('.')[0])
@classmethod
def get_metadata(cls):
"""Spider classes should override this method to return
a dict represention of a JSON document which describes the
spider.
See the sample spiders for a broad variety of metadata
examples.
"""
fmt = "%s must implememt class method 'get_metadata()'"
raise NotImplementedError(fmt % cls)
@property
def url(self):
"""Returns the URL that the spider will crawl."""
metadata = type(self).get_validated_metadata()
return metadata.get("url", None)
@property
def paranoia_level(self):
"""Returns the spider's paranoia level."""
metadata = type(self).get_validated_metadata()
return metadata["paranoiaLevel"]
@classmethod
def version(cls):
"""This method returns a spider's version which is the SHA1 of
the source code of the module containing the spider.
"""
module = sys.modules[cls.__module__]
source = inspect.getsource(module)
hash = hashlib.sha256(source.encode('UTF-8'))
return '%s:%s' % (hash.name, hash.hexdigest())
def crawl(self, browser, *args, **kwargs):
"""Spiders should override this method to implement
their own crawling logic.
"""
fmt = "%s must implememt crawl()"
raise NotImplementedError(fmt % self)
class SpiderMetadataError(Exception):
"""Raised by :py:meth:`Spider.get_validated_metadata` to indicate
that :py:meth:`Spider.get_metadata` returned invalid metadata.
"""
def __init__(self, spider_class, message_detail=None, ex=None):
fmt = "Spider class '%s' has invalid metadata"
message = fmt % spider_class.__name__
if message_detail:
message = "%s - %s" % (message, message_detail)
if ex:
message = "%s - %s" % (message, str(ex))
Exception.__init__(self, message)
class CLICrawlArgs(list):
"""During spider authoring, spiders are run from the command line
using the standard Python if __name__ == "__main__". In this mode,
arguments to the spider's crawl function will come from the
command line and extracted by interogating sys.argv - again, just
like a standard Python app. If no command line arguments are
available but the spider requires crawl args it would be great
if the spider prompted the user to enter each of the crawl args.
Of course the spider should be careful when it comes to
prompting for authenticating factors (passwords, etc.) not to echo
back the characters as they are entered. Further, given the
spider's metadata declares how to validate crawl arguments, as
the crawl args are entered by the user, the spider should validate
the entered text against the spider's metadata. There are other
scenarios to consider too. What if 2 command line args are given
but the spider requires 4? Simplest thing would be to display
a usage message.
So what does CLICrawlArgs do? Everything described above! The
last few statements in a spider should look like the code below
and everything described above is done by CLICrawlArgs:
from cloudfeaster import spider
.
.
.
if __name__ == '__main__':
crawl_args = spider.CLICrawlArgs(MySpider)
crawler = spider.SpiderCrawler(PyPISpider)
crawl_result = crawler.crawl(*crawl_args)
print(json.dumps(crawl_result))
sys.exit(1 if crawl_result.status_code else 0)
CLICrawlArgs depends heavily on a spider's metadata so spend
the time to get the metadata right.
"""
def __init__(self, spider_class):
list.__init__(self)
validated_metadata = spider_class.get_validated_metadata()
factor_display_order = validated_metadata["factorDisplayOrder"]
factor_display_names = validated_metadata["factorDisplayNames"]
lang = os.environ.get("LANG", "")[:2]
if len(factor_display_order) == (len(sys.argv) - 1):
self.extend(sys.argv[1:])
return
# specified some crawl args but we know it isn't the right number
# of arguments so construct and display a usage message
if 1 < len(sys.argv):
usage = "usage: %s" % os.path.split(sys.argv[0])[1]
for factor in factor_display_order:
usage = "%s <%s>" % (usage, factor)
print(usage)
# sys.exit() only returns when it's mocked
sys.exit(1)
return
identifying_factors = validated_metadata.get("identifyingFactors", {})
authenticating_factors = validated_metadata.get("authenticatingFactors", {})
factors = identifying_factors.copy()
factors.update(authenticating_factors)
for factor_name in factor_display_order:
while True:
arg = self.prompt_for_and_get_arg_value(
lang,
factor_name,
factor_name in identifying_factors,
factors,
factor_display_names)
if arg is not None:
break
self.append(arg)
def prompt_for_and_get_arg_value(self,
lang,
factor_name,
is_identifying_factor,
factors,
factor_display_names):
"""Prompt the user for the arg value for a factor.
Sounds like a pretty simple process but there is
complexity in scenarios where factors are enums and
the entire enum list is presented to the user.
The arg value is also validated against either the
factor's regular expression or the enum list.
If a valid value is entered it is returned otherwise
None is returned. It is expected that the caller
is in a tight loop iterating over this method until
a non-None response is returned for the factor.
"""
factor_display_name = factor_display_names[factor_name].get(
lang,
factor_display_names[factor_name].get("", factor_name))
enums = factors[factor_name].get("enum", None)
if enums:
prompt = "%s\n%s\n> " % (
factor_display_name,
"\n".join(["- %d. %s" % (i + 1, enums[i]) for i in range(0, len(enums))]),
)
else:
prompt = "%s%s%s> " % (colorama.Style.BRIGHT, factor_display_name, colorama.Style.RESET_ALL)
sys.stdout.write(prompt)
if is_identifying_factor:
arg = sys.stdin.readline().strip()
if enums:
try:
arg = int(arg)
if 1 <= arg and arg <= len(enums):
return enums[int(arg) - 1]
return None
except Exception:
return None
else:
arg = getpass.getpass("")
reg_ex_pattern = factors[factor_name]["pattern"]
reg_ex = re.compile(reg_ex_pattern)
if not reg_ex.match(arg):
return None
return arg
class CrawlResponse(dict):
"""Instances of this class are returned by ```Spider.crawl()```."""
SC_OK = 0
SC_CRAWL_RAISED_EXCEPTION = 400 + 1
SC_SPIDER_NOT_FOUND = 400 + 2
SC_CTR_RAISED_EXCEPTION = 400 + 3
SC_INVALID_CRAWL_RETURN_TYPE = 400 + 4
SC_INVALID_CRAWL_RESPONSE = 400 + 5
SC_INVALID_CRAWL_ARG = 400 + 6
SC_BAD_CREDENTIALS = 400 + 7
SC_ACCOUNT_LOCKED_OUT = 400 + 8
SC_COULD_NOT_CONFIRM_LOGIN_STATUS = 400 + 9
SC_UNKNOWN = 500
def __init__(self, status_code, status, *args, **kwargs):
kwargs['_metadata'] = {
'status': {
'code': status_code,
'message': status,
},
}
dict.__init__(self, *args, **kwargs)
@property
def status_code(self):
return self.get('_metadata', {}).get('status', {}).get('code', type(self).SC_UNKNOWN)
class CrawlResponseOk(CrawlResponse):
def __init__(self, *args, **kwargs):
CrawlResponse.__init__(self,
CrawlResponse.SC_OK,
'Ok',
*args, **kwargs)
class CrawlResponseBadCredentials(CrawlResponse):
def __init__(self, *args, **kwargs):
CrawlResponse.__init__(self,
CrawlResponse.SC_BAD_CREDENTIALS,
'bad credentials',
*args,
**kwargs)
class CrawlResponseAccountLockedOut(CrawlResponse):
def __init__(self, *args, **kwargs):
CrawlResponse.__init__(self,
CrawlResponse.SC_ACCOUNT_LOCKED_OUT,
'account locked out',
*args,
**kwargs)
class CrawlResponseCouldNotConfirmLoginStatus(CrawlResponse):
def __init__(self, *args, **kwargs):
CrawlResponse.__init__(self,
CrawlResponse.SC_COULD_NOT_CONFIRM_LOGIN_STATUS,
'could not confirm login status',
*args,
**kwargs)
class CrawlResponseInvalidCrawlReturnType(CrawlResponse):
def __init__(self, *args, **kwargs):
CrawlResponse.__init__(self,
CrawlResponse.SC_INVALID_CRAWL_RETURN_TYPE,
'spider crawl returned invalid type',
*args,
**kwargs)
class CrawlResponseInvalidCrawlResponse(CrawlResponse):
def __init__(self, ex, *args, **kwargs):
CrawlResponse.__init__(self,
CrawlResponse.SC_INVALID_CRAWL_RESPONSE,
'spider crawl returned invalid response - %s' % ex,
*args,
**kwargs)
class CrawlResponseCrawlRaisedException(CrawlResponse):
def __init__(self, ex, *args, **kwargs):
CrawlResponse.__init__(self,
CrawlResponse.SC_CRAWL_RAISED_EXCEPTION,
'spider crawl raised exception - %s' % ex,
*args,
**kwargs)
class CrawlResponseCtrRaisedException(CrawlResponse):
def __init__(self, ex, *args, **kwargs):
CrawlResponse.__init__(self,
CrawlResponse.SC_CTR_RAISED_EXCEPTION,
'spider ctr raised exception - %s' % ex,
*args,
**kwargs)
class CrawlResponseSpiderNotFound(CrawlResponse):
def __init__(self, full_spider_class_name, *args, **kwargs):
CrawlResponse.__init__(self,
CrawlResponse.SC_SPIDER_NOT_FOUND,
'could not find spider %s' % full_spider_class_name,
*args,
**kwargs)
class SpiderCrawler(object):
"""SpiderCrawler is a wrapper for ```Spider.crawl()``` ensuring
exceptions are always caught and and instance of ```CrawlResponse```
are always returned.
Below is the expected spider mainline illustrating how ```SpiderCrawler```
is expected to be used.
if __name__ == '__main__':
crawl_args = spider.CLICrawlArgs(PyPISpider)
crawler = spider.SpiderCrawler(PyPISpider)
crawl_result = crawler.crawl(crawl_args)
print(json.dumps(crawl_result))
sys.exit(1 if crawl_result.status_code else 0)
"""
def __init__(self, full_spider_class_name):
object.__init__(self)
self.full_spider_class_name = full_spider_class_name
self.logging_file = None
self.chromedriver_log_file = None
self.screenshot_file = None
def crawl(self, *args, **kwargs):
self._configure_logging(args)
#
# get the spider's class
#
(spider_class, crawl_response) = self._get_spider_class()
if crawl_response:
return crawl_response
#
# create an instance of the spider
#
try:
spider = spider_class()
except Exception as ex:
return CrawlResponseCtrRaisedException(ex)
#
# call the spider's crawl() method, validate crawl
# response and add crawl response metadata
#
dt_start = _utc_now()
try:
(_, self.chromedriver_log_file) = tempfile.mkstemp()
with self._get_browser(spider.url, spider.paranoia_level, self.chromedriver_log_file) as browser:
try:
crawl_response = spider.crawl(browser, *args, **kwargs)
except Exception as ex:
crawl_response = CrawlResponseCrawlRaisedException(ex)
if not isinstance(crawl_response, CrawlResponse):
crawl_response = CrawlResponseInvalidCrawlReturnType()
self.screenshot_file = self._take_screenshot(browser)
except Exception as ex:
crawl_response = CrawlResponseCrawlRaisedException(ex)
dt_end = _utc_now()
crawl_response['_metadata'].update({
'spider': {
'name': os.path.basename(sys.modules[type(spider).__module__].__file__),
'version': spider_class.version(),
},
'crawlArgs': [
privacy.hash_crawl_arg(arg) for arg in args
],
'crawlTime': {
'started': dt_start.isoformat(),
'durationInMs': int(1000.0 * (dt_end - dt_start).total_seconds()),
},
})
self._add_debug_file_to_crawl_response(
'screenshot',
self._file_to_data_uri_scheme(self.screenshot_file),
crawl_response)
self._add_debug_file_to_crawl_response(
'crawlLog',
self._file_to_data_uri_scheme(self.logging_file),
crawl_response)
self._add_debug_file_to_crawl_response(
'chromeDriverLog',
self._file_to_data_uri_scheme(self.chromedriver_log_file),
crawl_response)
#
# verify ```crawl_response```
#
try:
jsonschema.validate(crawl_response, jsonschemas.crawl_result)
except Exception as ex:
return CrawlResponseInvalidCrawlResponse(ex)
return crawl_response
def _file_to_data_uri_scheme(self, filename):
is_inline_debug = True if os.environ.get('CLF_INLINE_DEBUG', None) else False
if not is_inline_debug:
return filename
return util.file_to_data_uri_scheme(filename)
def _add_debug_file_to_crawl_response(self, key, value, crawl_response):
if key and value:
if '_debug' not in crawl_response:
crawl_response['_debug'] = {}
crawl_response['_debug'][key] = value
def _get_spider_class(self):
#
# deal with scenario where self.full_spider_class_name
# is in fact already a spider's class
#
if not isinstance(self.full_spider_class_name, str):
#
# so self.full_spider_class_name wasn't actually the name
# of the spider. or more precisely it wasn't a string and
# therefore we assume it's actually the spider's class
#
return (self.full_spider_class_name, None)
#
# is self.full_spider_class_name actually a pointer to a
# spider sitting in some arbitrary file. this is particuarly
# useful when testing spiderhost.py
#
reg_ex_pattern = r'\s*(?P<spider_module_filename>.+.py):(?P<spider_class_name>[^\s]+Spider)\s*$'
reg_ex = re.compile(reg_ex_pattern, flags=re.IGNORECASE)
match = reg_ex.match(self.full_spider_class_name)
if match:
try:
spider_module_filename = match.group('spider_module_filename')
spider_class_name = match.group('spider_class_name')
spider_module = imp.load_source('doicareaboutthisname', spider_module_filename)
spider_class = getattr(spider_module, spider_class_name)
return (spider_class, None)
except Exception:
return (None, CrawlResponseSpiderNotFound(self.full_spider_class_name))
#
# parse the full name of the spider, identify & load the
# module containing the spider and find the spider class
# in the loaded module
#
try:
split_full_spider_class_name = self.full_spider_class_name.split(".")
spider_module_name = ".".join(split_full_spider_class_name[:-1])
spider_class_name = split_full_spider_class_name[-1]
spider_module = importlib.import_module(spider_module_name)
spider_class = getattr(spider_module, spider_class_name)
return (spider_class, None)
except Exception:
return (None, CrawlResponseSpiderNotFound(self.full_spider_class_name))
def _get_browser(self, url, paranoia_level, chromedriver_log_file):
"""This private method exists to allow unit tests to mock out the method.
export CLF_REMOTE_CHROMEDRIVER=http://host.docker.internal:9515
"""
remote_chromedriver = os.environ.get('CLF_REMOTE_CHROMEDRIVER', None)
if remote_chromedriver:
return RemoteBrowser(remote_chromedriver, url, paranoia_level)
return Browser(url, paranoia_level, chromedriver_log_file)
def _take_screenshot(self, browser):
"""This is a private method which takes a screenshot of the browser's
current window and then adds the name of the temp file containing the
screenshot to the crawl response.
"""
(_, no_extension_screenshot) = tempfile.mkstemp()
screenshot_file = no_extension_screenshot + '.png'
os.rename(no_extension_screenshot, screenshot_file)
browser.save_screenshot(screenshot_file)
return screenshot_file
def _configure_logging(self, crawl_args):
clf_debug_value = os.environ.get('CLF_DEBUG', '')
reg_ex_pattern = '^(DEBUG|INFO|WARNING|ERROR|CRITICAL)$'
reg_ex = re.compile(reg_ex_pattern, re.IGNORECASE)
logging_level = clf_debug_value.upper() if reg_ex.match(clf_debug_value) else 'ERROR'
(_, self.logging_file) = tempfile.mkstemp()
logging.Formatter.converter = time.gmtime
logging_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s.%(msecs)03d+00:00 %(levelname)s %(module)s:%(lineno)d %(message)s',
},
},
'handlers': {
'file': {
'class': 'logging.FileHandler',
'filename': self.logging_file,
'mode': 'a',
'formatter': 'standard',
'filters': [
# privacy.RedactingFilter(crawl_args),
],
},
},
'root': {
'level': logging_level,
'handlers': [
'file',
],
},
}
logging.config.dictConfig(logging_config)
# :TODO: can this be configured in 'logging_config'?
# privacy.RedactingFormatter.install_for_all_handlers(crawl_args)
class RemoteBrowser(webdriver.Remote):
def __init__(self, remote_chromedriver, url, paranoia_level):
webdriver.Remote.__init__(self, remote_chromedriver)
self._url = url
self._paranoia_level = paranoia_level
def __enter__(self):
"""Along with ```___exit___()``` implements the standard
context manager pattern which, if a none-None url was
supplied in the ```Browser```'s ctr,
directs the browser to the specified url when entering
the context and closes the browser when exiting the
context. The pattern just makes using
```Browser``` way, way cleaner.
"""
if self._url:
self.get(self._url)
return self
def __exit__(self, exec_type, exec_val, ex_tb):
"""See ```___enter___()```."""
self.quit()
def create_web_element(self, element_id):
"""Override the default implementation of
```webdriver.Chrome.create_web_element```
to return a ```WebElement``` instead of a
```selenium.webdriver.remote.webelement.WebElement```.
"""
return WebElement(self._paranoia_level, self, element_id)
class Browser(webdriver.Chrome):
"""This class extends ```webdriver.Chrome``` to add new functionality
and override existing functionality that is well suited to writing
webdriver based Spiders.
"""
@classmethod
def get_chrome_options(cls, paranoia_level):
chrome_options = Options()
binary_location = os.environ.get('CLF_CHROME', None)
if binary_location:
chrome_options.binary_location = binary_location
_logger.info('using chrome binary >>>%s<<<', chrome_options.binary_location)
#
# -- https://peter.sh/experiments/chromium-command-line-switches/
#
chrome_options_str_format = (
'--headless|'
'--window-size=1280x1024|'
'--no-sandbox|'
'--disable-gpu|'
'--disable-software-rasterizer|'
'--single-process|'
'--disable-dev-shm-usage|'
'--user-agent={user_agent}'
)
chrome_options_str = os.environ.get(
'CLF_CHROME_OPTIONS',
chrome_options_str_format.format(user_agent=cloudfeaster_extension.user_agent()))
for chrome_option in chrome_options_str.split('|'):
chrome_options.add_argument(chrome_option)
_logger.info('using chrome option >>>%s<<<', chrome_option)
(proxy_host, proxy_port) = cloudfeaster_extension.proxy(paranoia_level)
if proxy_host is not None and proxy_port is not None:
chrome_option = '--proxy-server=%s:%d' % (proxy_host, proxy_port)
chrome_options.add_argument(chrome_option)
_logger.info('using chrome option >>>%s<<<', chrome_option)
return chrome_options
def __init__(self, url, paranoia_level, chromedriver_log_file):
"""Create a new instance of :py:class:`Browser`.
See :py:meth:`Browser.___enter___` to understand how and when the
```url``` argument is used.
"""
chrome_options = type(self).get_chrome_options(paranoia_level)
service_args = []
# nice reference @ http://chromedriver.chromium.org/logging
if chromedriver_log_file:
_logger.info('chromedriver logs @ >>>%s<<<', chromedriver_log_file)
service_args.append('--verbose')
service_args.append('--log-path=%s' % chromedriver_log_file)
webdriver.Chrome.__init__(
self,
chrome_options=chrome_options,
service_args=service_args)
self._url = url
self._paranoia_level = paranoia_level
def __enter__(self):
"""Along with ```___exit___()``` implements the standard
context manager pattern which, if a none-None url was
supplied in the ```Browser```'s ctr,
directs the browser to the specified url when entering
the context and closes the browser when exiting the
context. The pattern just makes using
```Browser``` way, way cleaner.
"""
if self._url:
self.get(self._url)
return self
def __exit__(self, exec_type, exec_val, ex_tb):
"""See ```___enter___()```."""
self.quit()
def create_web_element(self, element_id):
"""Override the default implementation of
```webdriver.Chrome.create_web_element```
to return a :py:class:`WebElement` instead of a
```selenium.webdriver.remote.webelement.WebElement```.
"""
return WebElement(self._paranoia_level, self, element_id)
class WebElement(selenium.webdriver.remote.webelement.WebElement):
"""This class extends ```selenium.webdriver.remote.webelement.WebElement```
to add new functionality and override existing functionality that is well
suited to writing webdriver based Spiders.
"""
_nonDigitAndNonDigitRegEx = re.compile(r'[^\d^\.]')
def __init__(self, paranoia_level, *args, **kwargs):
selenium.webdriver.remote.webelement.WebElement.__init__(self, *args, **kwargs)
self._paranoia_level = paranoia_level
def __eq__(self, other):
"""This is here only to resolve https://lgtm.com/rules/9990086/."""
if not isinstance(other, WebElement):
return False
if not selenium.webdriver.remote.webelement.WebElement.__eq__(self, other):
return False
return self._paranoia_level == other._paranoia_level
def get_text(self):
"""This method exists so spider code can access element data
using a set of methods instead of a text property and some
other methods like ```get_int()``` and ```get_float()```.
"""
return self.text
def _get_number(self, number_type, reg_ex):
text = self.get_text()
if reg_ex:
match = reg_ex.match(text)
if match is None:
return None
match_groups = match.groups()
if 1 != len(match_groups):
return None
text = match_groups[0]
text = type(self)._nonDigitAndNonDigitRegEx.sub('', text)
return number_type(text)
def get_int(self, reg_ex=None):
return self._get_number(int, reg_ex)
def get_float(self, reg_ex=None):
return self._get_number(float, reg_ex)
def get_selected(self):
"""This method is here only to act as a shortcut so that a spider
author can write a single line of code to get the correctly selected
option in a list rather than the few lines of code that's seen
in this method's implementation.
If an option is selected the option's text is returned otherwise
None is returned.
"""
select = selenium.webdriver.support.select.Select(self)
try:
return select.first_selected_option
except NoSuchElementException:
return None
def select_by_visible_text(self, visible_text):
"""This method is here only to act as a shortcut so that a spider
author can write a single line of code to select an option in a list
rather than two lines of code. Perhaps not a huge saving by every
little bit helps. As an aside, feels like this is the way the
select functionality should have been implemented anyway.
"""
select = selenium.webdriver.support.select.Select(self)
select.select_by_visible_text(visible_text)
def send_keys(self, value):
""":ODD: yes this implementation pattern looks odd. This approach is used
so there's a default implemenation which can be used during development
but also provides a clean approach to override the implementation.
"""
cloudfeaster_extension.send_keys(self._paranoia_level, self, value)
class SpiderDiscovery(object):
"""Discover all available spiders. This means locating concrete derived classes
of ```Spider``` in all available distributions.
"""
#
# egg_name_reg_ex is used to extract module names from egg_names
# like ```gaming_spiders-0.1.0-py2.7```.
#
_egg_name_reg_ex = re.compile(
r'^\s*(?P<egg_name>.+spiders)-\d+\.\d+\.\d+\-py\d+\.\d+\s*$',
re.IGNORECASE)
def __init__(self, samples=False):
object.__init__(self)
self._samples = samples
def discover(self):
#
# find and import all packages that might contain spiders
#
for distro in pkg_resources.working_set:
match = type(self)._egg_name_reg_ex.match(distro.egg_name())
_logger.info("assessing distro for spiders '%s'", distro.egg_name())
if match:
egg_name = match.group('egg_name')
_logger.info("matched distro for spiders '%s'", egg_name)
type(self).load_and_discover_all_spiders_in_package(egg_name)
if self._samples:
importlib.import_module('cloudfeaster.samples')
samples_dir = os.path.dirname(sys.modules['cloudfeaster.samples'].__file__)
def filter(filename): return(filename.endswith('.py') and not filename.startswith('__'))
sample_spiders = [filename[:-len('.py')] for filename in os.listdir(samples_dir) if filter(filename)]
for sample_spider in sample_spiders:
importlib.import_module('cloudfeaster.samples.{spider}'.format(spider=sample_spider))
#
# with all packages loaded that might contain spiders, find all
# the concrete subclasses of ```cloudfeaster.spider.Spider```
# which will be the spiders we're interested in
#
concrete_spider_classes = self._find_concrete_spider_classes(Spider)
#
# now for some fancy formatting of the results
#
rv = {}
for concrete_spider_class in concrete_spider_classes:
metadata = concrete_spider_class.get_validated_metadata()
fully_qualified_class = metadata['fullyQualifiedClassName']
spider = fully_qualified_class.split('.')[-2]
for category in metadata['categories']:
if category not in rv:
rv[category] = {}
rv[category][spider] = metadata
#
# all done!
#
return rv
def _find_concrete_spider_classes(self, base_class):
base_msg = "looking for concrete spider classes of base class '%s.%s'" % (
base_class.__module__,
base_class.__name__,
)
_logger.info(base_msg)
concrete_spider_classes = []
for sub_class in base_class.__subclasses__():
fully_qualified_class_name = '%s.%s' % (sub_class.__module__, sub_class.__name__)
_logger.info("%s - assessing '%s'", base_msg, fully_qualified_class_name)
if not sub_class.__subclasses__():
msg_fmt = "{base_msg} - identified concrete class '{class_name}'"
msg = msg_fmt.format(base_msg=base_msg, class_name=fully_qualified_class_name)
_logger.info(msg)
concrete_spider_classes.append(sub_class)
else:
_logger.info("%s - identified abstract class '%s'", base_msg, fully_qualified_class_name)
concrete_spider_classes.extend(self._find_concrete_spider_classes(sub_class))
return concrete_spider_classes
@classmethod
def load_and_discover_all_spiders_in_package(cls, spider_package_name):
"""This is a public class method mostly because sometimes in testing it's super
useful to be able to load spider distros.
"""
spider_package = importlib.import_module(spider_package_name)
spider_package_dir_name = os.path.dirname(spider_package.__file__)
_logger.info("looking for spiders in directory '%s'", spider_package_dir_name)
for (_, name, ispkg) in pkgutil.iter_modules([spider_package_dir_name]):
if not ispkg:
module_name = '%s.%s' % (spider_package_name, name)
_logger.info("attempting to import spider module '%s'", module_name)
importlib.import_module(module_name)
| simonsdave/cloudfeaster | cloudfeaster/spider.py | Python | mit | 40,751 |
import os
import pytest
from pre_commit_hooks.check_symlinks import check_symlinks
from testing.util import get_resource_path
@pytest.mark.xfail(os.name == 'nt', reason='No symlink support on windows')
@pytest.mark.parametrize(('filename', 'expected_retval'), (
('broken_symlink', 1),
('working_symlink', 0),
))
def test_check_symlinks(filename, expected_retval):
ret = check_symlinks([get_resource_path(filename)])
assert ret == expected_retval
| Coverfox/pre-commit-hooks | tests/check_symlinks_test.py | Python | mit | 466 |
import requests
from requests.auth import AuthBase
from requests.auth import HTTPBasicAuth
auth = HTTPBasicAuth('ryan', 'password')
r = requests.post(url="http://pythonscraping.com/pages/auth/login.php", auth=auth)
print(r.text)
| sunrin92/LearnPython | 2-WebScrapingWithPython/auth.py | Python | mit | 232 |
import numpy as np
import cv2
from matplotlib import pyplot as plt
imgL=cv2.imread('left.png',0)
imgR=cv2.imread('right.png',0)
stereo=cv2.StereoBM_create(numDisparities=16, blockSize=15)
disparity=stereo.compute(imgL,imgR)
plt.imshow(disparity,'gray')
plt.show() | arpan-bhowmick9/Image-Processing | Disparity Map/Disparity_Map.py | Python | mit | 265 |
import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from model import CRNN, CtcCriterion
from dataset import DatasetLmdb, SynthLmdb
import tensorflow as tf
import numpy as np
import signal
import utility
import sys
import time
class Conf:
def __init__(self):
self.nClasses = 36
self.trainBatchSize = 64
self.evalBatchSize = 200
self.testBatchSize = 10
self.maxIteration = 2000000
self.displayInterval = 1
self.evalInterval = 10
self.testInterval = 20
self.saveInterval = 50000
self.modelDir = os.path.abspath(os.path.join('..', 'model', 'ckpt'))
# self.dataSet = os.path.join('..', 'data', 'Synth')
# self.auxDataSet = os.path.join('..', 'data', 'aux_Synth')
self.dataSet = os.path.join('..', 'data', 'IIIT5K')
self.maxLength = 24
self.trainLogPath = os.path.abspath(os.path.join('..', 'model', 'log'))
if __name__ == '__main__':
gConfig = Conf()
sess = tf.Session()
ckpt = utility.checkPointLoader(gConfig.modelDir)
imgs = tf.placeholder(tf.float32, [None, 32, 100])
decode_labels = tf.sparse_placeholder(tf.int32)
labels = tf.placeholder(tf.int32,[None])
target_seq_lengths = tf.placeholder(tf.int32, [None])
input_seq_lengths = tf.placeholder(tf.int32, [None])
rnn_seq_lengths = tf.placeholder(tf.int32, [None])
isTraining = tf.placeholder(tf.bool)
keepProb = tf.placeholder(tf.float32)
pred_labels = tf.placeholder(tf.string, [None])
true_labels = tf.placeholder(tf.string, [None])
# sequence lengths definition for dynamic rnn requirement
trainSeqLength = [gConfig.maxLength for i in range(gConfig.trainBatchSize)]
testSeqLength = [gConfig.maxLength for i in range(gConfig.testBatchSize)]
evalSeqLength = [gConfig.maxLength for i in range(gConfig.evalBatchSize)]
crnn = CRNN(imgs, gConfig, isTraining, keepProb, rnn_seq_lengths, sess)
ctc = CtcCriterion(crnn.prob, input_seq_lengths, labels, target_seq_lengths, pred_labels, true_labels)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(gConfig.trainLogPath, sess.graph)
global_step = tf.Variable(0)
optimizer = tf.train.AdadeltaOptimizer(0.001).minimize(ctc.cost, global_step=global_step)
if ckpt is None:
init = tf.global_variables_initializer()
sess.run(init)
step = 0
else:
crnn.loadModel(ckpt)
step = sess.run([global_step])
data = DatasetLmdb(gConfig.dataSet)
# data = SynthLmdb(gConfig.dataSet, gConfig.auxDataSet)
trainAccuracy = 0
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
crnn.saveModel(gConfig.modelDir, step)
print("%d steps trained model has saved" % step)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
start_time = time.time()
t = start_time
while True:
#train
batchSet, labelSet, seqLengths = data.nextBatch(gConfig.trainBatchSize)
cost, _, step, summary = sess.run([ctc.cost, optimizer, global_step, merged],feed_dict={
crnn.inputImgs:batchSet,
crnn.isTraining:True,
crnn.keepProb:1.0,
crnn.rnnSeqLengths:trainSeqLength,
ctc.lossTarget:labelSet[1],
ctc.targetSeqLengths:seqLengths,
ctc.inputSeqLengths:trainSeqLength
})
if step % gConfig.displayInterval == 0:
time_elapse = time.time() - t
t = time.time()
total_time = time.time() - start_time
train_writer.add_summary(summary, step)
print("step: %s, cost: %s, step time: %.2fs, total time: %.2fs" % (step, cost, time_elapse, total_time))
#eval accuarcy
if step != 0 and step % gConfig.evalInterval == 0:
batchSet, labelSet, seqLengths = data.nextBatch(gConfig.evalBatchSize)
# print(batchSet.shape, labelSet.shape)
p = sess.run(crnn.rawPred, feed_dict={
crnn.inputImgs:batchSet,
crnn.isTraining:False,
crnn.keepProb:1.0,
crnn.rnnSeqLengths:evalSeqLength,
ctc.inputSeqLengths:evalSeqLength
})
original = utility.convertSparseArrayToStrs(labelSet)
predicted = utility.simpleDecoder(p)
# trainAccuracy = sess.run([ctc.accuracy], feed_dict={
# ctc.pred_labels: predicted,
# ctc.true_labels: original
# })
trainAccuracy = utility.eval_accuracy(predicted, original)
print("step: %d, training accuracy %f" % (step, trainAccuracy))
#small test
if step != 0 and step % gConfig.testInterval == 0:
batchSet, labelSet, seqLengths = data.nextBatch(gConfig.testBatchSize)
p = sess.run(crnn.rawPred, feed_dict={
crnn.inputImgs:batchSet,
crnn.isTraining:False,
crnn.keepProb:1.0,
crnn.rnnSeqLengths:testSeqLength,
ctc.inputSeqLengths:testSeqLength
})
original = utility.convertSparseArrayToStrs(labelSet)
predictedWithBlank = utility.simpleDecoderWithBlank(p)
predicted = utility.simpleDecoder(p)
for i in range(len(original)):
print("original: %s, predicted(no decode): %s, predicted: %s" % (original[i], predictedWithBlank[i], predicted[i]))
if step >= gConfig.maxIteration:
print("%d training has completed" % gConfig.maxIteration)
crnn.saveModel(gConfig.modelDir, step)
sys.exit(0)
if step != 0 and step % gConfig.saveInterval == 0:
print("%d training has saved" % step)
crnn.saveModel(gConfig.modelDir, step) | wcy940418/CRNN-end-to-end | src/training.py | Python | mit | 5,128 |
print(" # db_api.py")
# from abc import ABCMeta, abstractmethod
from sqlalchemy.exc import IntegrityError
from basic_wms.model import db_model
class CRUDsAbstractBase:
"""
Kinda abstract base class.
"""
@classmethod
def create(cls, *args, **kwargs):
raise NotImplementedError('*create* method not implemented')
@classmethod
def get_one(cls, id_):
raise NotImplementedError('*get_one* method not implemented')
@classmethod
def get_all(cls, with_deleted=False):
raise NotImplementedError('*get_all* method not implemented')
@classmethod
def update(cls, id_, **kwargs):
raise NotImplementedError('*update* method not implemented')
@classmethod
def delete(cls, id_):
raise NotImplementedError('*delete* method not implemented')
@classmethod
def undelete(cls, id_):
raise NotImplementedError('*undelete* method not implemented')
class CRUDsCommonFields(CRUDsAbstractBase):
# SQLA_class gets overwritten by children's class
SQLA_class = None
attribute_error_message = "*SQLA_class* - a class of relevant SQL " \
"Alchemy model - should be defined in " \
"the following CRUD method: {}."
@classmethod
def create(cls, *args, **kwargs):
raise NotImplementedError('*create* method not implemented')
@classmethod
def get_one(cls, id_):
"""
Returns dictionary with data about an entity with *id_* id.
In case of Warehouse:
{'id': int, 'deleted' : bool, 'name': str, 'location': str}.
In case of Supplier:
{'id': int, 'deleted': bool, 'VATIN': str, 'name': str,
'location': str}.
In case of ItemType:
{'id': int, 'deleted': bool, 'name': str, 'item_model': str,
'manufacturer': str, 'unit_of_measure': str}.
In case of ItemBatch:
{'id': int, 'deleted': bool, 'quantity': str,
'warehouse_id': int, 'supplier_id': int, 'item_type_id': int}.
"""
try:
return cls.SQLA_class.get_one(id_).serialize
except AttributeError:
raise AttributeError(cls.attribute_error_message.format(cls))
@classmethod
def get_all(cls, with_deleted=False):
"""
Yields all entities in serialized form.
In case of Warehouse:
{'id': int, 'deleted' : bool, 'name': str, 'location': str}.
In case of Supplier:
{'id': int, 'deleted': bool, 'VATIN': str, 'name': str,
'location': str}.
In case of ItemType:
{'id': int, 'deleted': bool, 'name': str, 'item_model': str,
'manufacturer': str, 'unit_of_measure': str}.
In case of ItemBatch:
{'id': int, 'deleted': bool, 'quantity': str,
'warehouse_id': int, 'supplier_id': int, 'item_type_id': int}.
"""
try:
items = cls.SQLA_class.get_all()
except AttributeError:
raise AttributeError(cls.attribute_error_message.format(cls))
for item in items:
if not item.deleted or with_deleted:
yield item.serialize
@classmethod
def update(cls, id_, **kwargs):
raise NotImplementedError('*update* method not implemented')
@classmethod
def delete(cls, id_):
"""
Marks an entity with given *id* as deleted.
Returns True if successful, False if it was already deleted.
"""
try:
item = cls.SQLA_class.get_one(id_=id_)
except AttributeError:
raise AttributeError(cls.attribute_error_message.format(cls))
if not item.deleted:
item.deleted = True
db_model.db.session.add(item)
db_model.db.session.commit()
return True
else:
return False
@classmethod
def undelete(cls, id_):
"""
Marks entity with given *id* as not deleted.
Returns True if successful, False if it wasn't deleted.
"""
try:
item = cls.SQLA_class.get_one(id_=id_)
except AttributeError:
raise AttributeError(cls.attribute_error_message.format(cls))
if item.deleted:
item.deleted = False
db_model.db.session.add(item)
db_model.db.session.commit()
return True
else:
return False
class WarehouseCRUD(CRUDsCommonFields):
SQLA_class = db_model.WarehouseSQLA
@classmethod
def create(cls, name, location):
"""
Adds warehouse to database and returns its *id* or None in case
of IntegrityError.
"""
assert isinstance(name, str),\
'WarehouseCRUD.create(): name should be a string'
assert isinstance(location, str),\
'WarehouseCRUD.create(): location should be a string'
warehouse = cls.SQLA_class(name=name, location=location)
db_model.db.session.add(warehouse)
if _db_commit_with_integrity_handling(db_model.db.session):
return warehouse.id_
else:
return None
@classmethod
def update(cls, id_, name=None, location=None):
"""
Updates in db name and/or location of a warehouse with given *id_*.
In case of IntegrityError returns False, otherwise returns True.
"""
# creating dictionary of all arguments, but *id_*
kwargs = locals()
kwargs.pop("id_")
entity = _update_entity(entity=cls.SQLA_class.get_one(id_),
kwargs=kwargs)
db_model.db.session.add(entity)
return _db_commit_with_integrity_handling(db_model.db.session)
class SupplierCRUD(CRUDsCommonFields):
SQLA_class = db_model.SupplierSQLA
@classmethod
def create(cls, VATIN, name, location):
"""
Adds supplier to database and returns its *id* or None in case
of IntegrityError.
"""
assert isinstance(VATIN, str),\
'SupplierCRUD.create(): VATIN should be a string'
assert isinstance(name, str),\
'SupplierCRUD.create(): name should be a string'
assert isinstance(location, str),\
'SupplierCRUD.create(): location should be a string'
supplier = cls.SQLA_class(VATIN=VATIN, name=name,
location=location)
db_model.db.session.add(supplier)
if _db_commit_with_integrity_handling(db_model.db.session):
return supplier.id_
else:
return None
@classmethod
def update(cls, id_, VATIN=None, name=None, location=None):
"""
Updates in db VATIN and/or name and/or location of a supplier
with given *id_*.
In case of IntegrityError returns False, otherwise returns True.
"""
# creating dictionary of all arguments, but *id_*
kwargs = locals()
kwargs.pop("id_")
entity = _update_entity(entity=cls.SQLA_class.get_one(id_),
kwargs=kwargs)
db_model.db.session.add(entity)
return _db_commit_with_integrity_handling(db_model.db.session)
class ItemTypeCRUD(CRUDsCommonFields):
SQLA_class = db_model.ItemTypeSQLA
@classmethod
def create(cls, name, item_model, manufacturer, unit_of_measure):
"""
Adds new item type to db and returns its *id* or None in case of
IntegrityError.
"""
assert isinstance(item_model, str),\
'ItemTypeCRUD.create(): item_model should be a string'
assert isinstance(manufacturer, str),\
'ItemTypeCRUD.create(): manufacturer should be a string'
assert isinstance(unit_of_measure, str),\
'ItemTypeCRUD.create(): unit_of_measure should be a string'
item_type = cls.SQLA_class(name=name, item_model=item_model,
manufacturer=manufacturer,
unit_of_measure=unit_of_measure)
db_model.db.session.add(item_type)
if _db_commit_with_integrity_handling(db_model.db.session):
return item_type.id_
else:
return None
@classmethod
def update(cls, id_, name=None, item_model=None, manufacturer=None,
unit_of_measure=None):
"""
Updates in db name and/or item_model and/or manufacturer
and/or unit_of_measure of an item_type with given *id_*.
In case of IntegrityError returns False, otherwise returns True.
"""
# creating dictionary of all arguments, but *id_*
kwargs = locals()
kwargs.pop("id_")
entity = _update_entity(entity=cls.SQLA_class.get_one(id_),
kwargs=kwargs)
db_model.db.session.add(entity)
return _db_commit_with_integrity_handling(db_model.db.session)
class ItemBatchCRUD(CRUDsCommonFields):
SQLA_class = db_model.ItemBatchSQLA
@classmethod
def create(cls, quantity, warehouse_id, supplier_id, item_type_id):
"""
Adds new item batch to db and returns its *id* or None in case of
IntegrityError.
"""
assert isinstance(quantity, int),\
'ItemBatchCRUD.create(): quantity should be an integer'
assert isinstance(warehouse_id, int),\
'ItemBatchCRUD.create(): warehouse_id should be an integer'
assert isinstance(supplier_id, int),\
'ItemBatchCRUD.create(): supplier_id should be an integer'
assert isinstance(item_type_id, int),\
'ItemBatchCRUD.create(): item_type_id should be an integer'
warehouse = WarehouseCRUD.SQLA_class.get_one(warehouse_id)
supplier = SupplierCRUD.SQLA_class.get_one(supplier_id)
item_type = ItemTypeCRUD.SQLA_class.get_one(item_type_id)
item_batch = cls.SQLA_class(quantity=quantity, warehouse=warehouse,
supplier=supplier, item_type=item_type)
db_model.db.session.add(item_batch)
if _db_commit_with_integrity_handling(db_model.db.session):
return item_batch.id_
else:
return None
@classmethod
def update(cls, id_, quantity=None, warehouse_id=None,
supplier_id=None, item_type_id=None):
"""
Updates in db quantity_id and/or warehouse_id and/or supplier_id
and/or item_type of an item_batch with given *id_*.
In case of IntegrityError returns False, otherwise returns True.
"""
warehouse = WarehouseCRUD.SQLA_class.get_one(warehouse_id)
supplier = SupplierCRUD.SQLA_class.get_one(supplier_id)
item_type = ItemTypeCRUD.SQLA_class.get_one(item_type_id)
# creating dictionary of all relevant variables
kwargs = dict()
for i in ('warehouse', 'supplier', 'item_type'):
kwargs[i] = locals()[i]
entity = _update_entity(entity=cls.SQLA_class.get_one(id_),
kwargs=kwargs)
db_model.db.session.add(entity)
return _db_commit_with_integrity_handling(db_model.db.session)
# HELPER METHODS
def _db_commit_with_integrity_handling(db_session):
"""
Takes SQLAlchemy session. Returns False if there was an IntegrityError
during commit, otherwise returns True.
"""
try:
db_session.commit()
except IntegrityError:
db_session.rollback()
return False
return True
def _update_entity(entity, kwargs):
for key, value in kwargs.items():
if value is not None:
setattr(entity, key, value)
return entity
| pawelswiecki/basic-wms | basic_wms/model/db_api.py | Python | mit | 12,060 |
"""
ha_test.test_component_group
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the group compoments.
"""
# pylint: disable=protected-access,too-many-public-methods
import unittest
from datetime import datetime, timedelta
import logging
import os
import homeassistant as ha
import homeassistant.loader as loader
from homeassistant.components import (
STATE_HOME, STATE_NOT_HOME, ATTR_ENTITY_PICTURE)
import homeassistant.components.device_tracker as device_tracker
from helper import get_test_home_assistant
def setUpModule(): # pylint: disable=invalid-name
""" Setup to ignore group errors. """
logging.disable(logging.CRITICAL)
class TestComponentsDeviceTracker(unittest.TestCase):
""" Tests homeassistant.components.device_tracker module. """
def setUp(self): # pylint: disable=invalid-name
""" Init needed objects. """
self.hass = get_test_home_assistant()
loader.prepare(self.hass)
self.known_dev_path = self.hass.get_config_path(
device_tracker.KNOWN_DEVICES_FILE)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
if os.path.isfile(self.known_dev_path):
os.remove(self.known_dev_path)
def test_is_on(self):
""" Test is_on method. """
entity_id = device_tracker.ENTITY_ID_FORMAT.format('test')
self.hass.states.set(entity_id, STATE_HOME)
self.assertTrue(device_tracker.is_on(self.hass, entity_id))
self.hass.states.set(entity_id, STATE_NOT_HOME)
self.assertFalse(device_tracker.is_on(self.hass, entity_id))
def test_setup(self):
""" Test setup method. """
# Bogus config
self.assertFalse(device_tracker.setup(self.hass, {}))
self.assertFalse(
device_tracker.setup(self.hass, {device_tracker.DOMAIN: {}}))
# Test with non-existing component
self.assertFalse(device_tracker.setup(
self.hass, {device_tracker.DOMAIN: {ha.CONF_TYPE: 'nonexisting'}}
))
# Test with a bad known device file around
with open(self.known_dev_path, 'w') as fil:
fil.write("bad data\nbad data\n")
self.assertFalse(device_tracker.setup(self.hass, {
device_tracker.DOMAIN: {ha.CONF_TYPE: 'test'}
}))
def test_device_tracker(self):
""" Test the device tracker class. """
scanner = loader.get_component(
'device_tracker.test').get_scanner(None, None)
scanner.come_home('dev1')
scanner.come_home('dev2')
self.assertTrue(device_tracker.setup(self.hass, {
device_tracker.DOMAIN: {ha.CONF_TYPE: 'test'}
}))
# Ensure a new known devices file has been created.
# Since the device_tracker uses a set internally we cannot
# know what the order of the devices in the known devices file is.
# To ensure all the three expected lines are there, we sort the file
with open(self.known_dev_path) as fil:
self.assertEqual(
['dev1,unknown_device,0,\n', 'dev2,DEV2,0,\n',
'device,name,track,picture\n'],
sorted(fil))
# Write one where we track dev1, dev2
with open(self.known_dev_path, 'w') as fil:
fil.write('device,name,track,picture\n')
fil.write('dev1,Device 1,1,http://example.com/dev1.jpg\n')
fil.write('dev2,Device 2,1,http://example.com/dev2.jpg\n')
scanner.leave_home('dev1')
scanner.come_home('dev3')
self.hass.services.call(
device_tracker.DOMAIN,
device_tracker.SERVICE_DEVICE_TRACKER_RELOAD)
self.hass._pool.block_till_done()
dev1 = device_tracker.ENTITY_ID_FORMAT.format('Device_1')
dev2 = device_tracker.ENTITY_ID_FORMAT.format('Device_2')
dev3 = device_tracker.ENTITY_ID_FORMAT.format('DEV3')
now = datetime.now()
nowNext = now + timedelta(seconds=ha.TIMER_INTERVAL)
nowAlmostMinGone = (now + device_tracker.TIME_DEVICE_NOT_FOUND -
timedelta(seconds=1))
nowMinGone = nowAlmostMinGone + timedelta(seconds=2)
# Test initial is correct
self.assertTrue(device_tracker.is_on(self.hass))
self.assertFalse(device_tracker.is_on(self.hass, dev1))
self.assertTrue(device_tracker.is_on(self.hass, dev2))
self.assertIsNone(self.hass.states.get(dev3))
self.assertEqual(
'http://example.com/dev1.jpg',
self.hass.states.get(dev1).attributes.get(ATTR_ENTITY_PICTURE))
self.assertEqual(
'http://example.com/dev2.jpg',
self.hass.states.get(dev2).attributes.get(ATTR_ENTITY_PICTURE))
# Test if dev3 got added to known dev file
with open(self.known_dev_path) as fil:
self.assertEqual('dev3,DEV3,0,\n', list(fil)[-1])
# Change dev3 to track
with open(self.known_dev_path, 'w') as fil:
fil.write("device,name,track,picture\n")
fil.write('dev1,Device 1,1,http://example.com/picture.jpg\n')
fil.write('dev2,Device 2,1,http://example.com/picture.jpg\n')
fil.write('dev3,DEV3,1,\n')
# reload dev file
scanner.come_home('dev1')
scanner.leave_home('dev2')
self.hass.services.call(
device_tracker.DOMAIN,
device_tracker.SERVICE_DEVICE_TRACKER_RELOAD)
self.hass._pool.block_till_done()
# Test what happens if a device comes home and another leaves
self.assertTrue(device_tracker.is_on(self.hass))
self.assertTrue(device_tracker.is_on(self.hass, dev1))
# Dev2 will still be home because of the error margin on time
self.assertTrue(device_tracker.is_on(self.hass, dev2))
# dev3 should be tracked now after we reload the known devices
self.assertTrue(device_tracker.is_on(self.hass, dev3))
self.assertIsNone(
self.hass.states.get(dev3).attributes.get(ATTR_ENTITY_PICTURE))
# Test if device leaves what happens, test the time span
self.hass.bus.fire(
ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: nowAlmostMinGone})
self.hass._pool.block_till_done()
self.assertTrue(device_tracker.is_on(self.hass))
self.assertTrue(device_tracker.is_on(self.hass, dev1))
# Dev2 will still be home because of the error time
self.assertTrue(device_tracker.is_on(self.hass, dev2))
self.assertTrue(device_tracker.is_on(self.hass, dev3))
# Now test if gone for longer then error margin
self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: nowMinGone})
self.hass._pool.block_till_done()
self.assertTrue(device_tracker.is_on(self.hass))
self.assertTrue(device_tracker.is_on(self.hass, dev1))
self.assertFalse(device_tracker.is_on(self.hass, dev2))
self.assertTrue(device_tracker.is_on(self.hass, dev3))
| loghound/home-assistant | ha_test/test_component_device_scanner.py | Python | mit | 7,036 |
#!/usr/bin/env python3
def main():
nx = 200
ny = 100
print("P3\n",nx," ",ny,"\n255")
for j in reversed(range(ny)):
for i in range(nx):
r = i / nx
g = j / ny
b = 0.2
ir = int(255.99*r)
ig = int(255.99*g)
ib = int(255.99*b)
print(ir," ",ig," ",ib)
if __name__ == '__main__':
main() | fernandomv3/raytracing_in_one_week | ch01/raytracer.py | Python | mit | 338 |
import serial
import io
import win32gui
import win32api
import win32con
import sys
import time
from math import atan2,cos
def LeftClick():
pos = get_curpos()
handle = get_win_handle(pos)
client_pos = win32gui.ScreenToClient(handle, pos)
tmp = win32api.MAKELONG(client_pos[0], client_pos[1])
win32gui.SendMessage(handle, win32con.WM_ACTIVATE, win32con.WA_ACTIVE, 0)
win32api.SendMessage(handle, win32con.WM_LBUTTONDOWN, win32con.MK_LBUTTON, tmp)
win32api.SendMessage(handle, win32con.WM_LBUTTONUP, win32con.MK_LBUTTON, tmp)
def RightClick():
pos = get_curpos()
handle = get_win_handle(pos)
client_pos = win32gui.ScreenToClient(handle, pos)
tmp = win32api.MAKELONG(client_pos[0], client_pos[1])
win32gui.SendMessage(handle, win32con.WM_ACTIVATE, win32con.WA_ACTIVE, 0)
win32api.SendMessage(handle, win32con.WM_RBUTTONDOWN, win32con.MK_RBUTTON, tmp)
win32api.SendMessage(handle, win32con.WM_RBUTTONUP, win32con.MK_RBUTTON, tmp)
def scroll(i):
x,y = get_curpos()
win32api.mouse_event(win32con.MOUSEEVENTF_WHEEL, x, y, i, 0)
def MoveRelative(x,y):
pos = get_curpos()
win32api.SetCursorPos((pos[0]+x,pos[1]-y))
def MoveRelativeXAbsY(x,y):
pos = get_curpos()
win32api.SetCursorPos((pos[0]+x,y))
def MoveRelativeYAbsX(x,y):
pos = get_curpos()
win32api.SetCursorPos((x,pos[1]))
def get_curpos():
return win32gui.GetCursorPos()
def get_win_handle(pos):
return win32gui.WindowFromPoint(pos)
if __name__ == '__main__':
left = False
right = False
Gyro_gain = 0.061
DT = 0.001
rate_gyr_x = 0
rate_gyr_y = 0
rate_gyr_z = 0
gxAngle = 0
gyAngle = 0
gzAngle = 0
CFaX = 0
CFaY = 0
CFaZ = 0
ser = serial.Serial("COM6", 115200,timeout=1)
sio = io.TextIOWrapper(io.BufferedRWPair(ser, ser))
print "Config Complete"
prevy = 0
prevz = 0
xOff=0
yOff=0
zOff=0
accx = 0
accy = 0
accz = 0
pCFaX = 0
pCFaZ = 0
pX = 0
avgZ = 0
avgX = 0
avgY = 0
buffer_string = ""
while(1):
if win32api.GetAsyncKeyState(ord('Q')) :
break
try:
buffer_string = buffer_string + ser.read(ser.inWaiting())
if '\n' in buffer_string:
# Every new line Decode input
lines = buffer_string.split('\n')
last_received = lines[-2]
buffer_string = lines[-1]
#print last_received
"""
line = sio.readline()
line.strip()
print line
"""
inputs = map(int,last_received.split(","))
p1 = inputs[0]
p2 = inputs[1]
rate_gyr_x = inputs[2] * Gyro_gain
rate_gyr_y = inputs[3] * Gyro_gain
rate_gyr_z = inputs[4] * Gyro_gain
avgZ = 0.3 * rate_gyr_z + avgZ * 0.7
avgX = 0.3 * rate_gyr_x + avgX * 0.7
avgY = 0.3 * rate_gyr_y + avgY * 0.7
if(1<abs(avgY)<10):
scroll(int(avgY))
# play with the values
#if((1<abs(rate_gyr_x + 1.6)<10)and(1<abs(rate_gyr_z)<10)):
if((1<abs(avgX + 1.6)<10)and(1<abs(avgZ)<10)):
MoveRelative(-int(avgZ*1.1),-int((avgX+1.6)*1.1))
elif(1<abs(avgX + 1.1)<10):
MoveRelative(0, -int((avgX+1.5)*1.6))
elif(1<abs(avgZ)<10):
MoveRelative(-int(avgZ*1.1), 0)
# working code
# if((0.5<abs(rate_gyr_x + 1.5)<10)and(0.5<abs(rate_gyr_z)<10)):
# MoveRelative(-int(avgZ*1.6),-int((avgX+1.5)*1.6))
# elif(0.5<abs(avgX + 1.5)<10):
# MoveRelative(0, -int((avgX+1.5)*1.6))
# elif(0.5<abs(avgZ)<10):
# MoveRelative(-int(avgZ*1.6), 0)
#print rate_gyr_x
#if((1.5<abs(rate_gyr_x + 1.5)<10)and(1.5<abs(rate_gyr_z)<10)):
# MoveRelative(-int(rate_gyr_z*1.6),-int((rate_gyr_x + 1.5)*1.6))
#elif(3<abs(rate_gyr_x + 1.5)<10):
# MoveRelative(0,-int((rate_gyr_x + 1.5)*1.6))
#elif(3<abs(rate_gyr_z)<10):
# MoveRelative(-int(rate_gyr_z*1.6), 0)
if(p1 == 1):
left = True
else:
if(left):
left = False
LeftClick()
if(p2 == 1):
right = True
else:
if(right):
right = False
RightClick()
#time.sleep(0.005)
# except IndexError:
# pass
except ValueError:
pass
except:
print "Unexpected error:", sys.exc_info()[0]
raise
ser.close()
# INPUT[5] Z
# INPUT[7] X
#print rate_gyr_z
#print avgZ
#if(1<abs(avgZ)<10):
## MoveRelative(-int(avgZ*0.5),0)
| MandarGogate/AM-AIR-MOUSE | Mouse-Driver.py | Python | cc0-1.0 | 5,379 |
#coding:utf-8
from threading import Thread
from queue import Queue
import paramiko
import time
class Thread_Paramiko(Thread):
'''
paramiko 多线程实现命令分发
'''
def __init__(self,UserInfo):
super(Thread_Paramiko,self).__init__()#初始化父类
'''__CmdQueue是Cmd消息队列,设为私有更安全
'''
self.__CmdQueue = Queue()
self.__Host = UserInfo
'''把ServerName当作进程名字,这样为以后线程管理做打算
'''
self._name = self.__Host[2]
'''self.DBQueue = DBQueue
DBQueue不由各线程控制,用户信息由主线程控制,用户信息由函数参数传进__Host Ps:有想法把列表改为字典,这样代码更易阅读
if not self.DBQueue.empty():
self.__Host = self.DBQueue.get()
'''
'''self.__Host 元素对照表
self.__Host[0] == hostname
self.__Host[1] == port
self.__Host[2] == servername
self.__Host[3] == username
self.__host[4] == passwd
'''
def __del__(self):
print("服务器:%s执行完成线程结束" % (self.__Host[2],))
def connect(self):#进行ssh连接用密码
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.__Host[4] = self.__Host[4].strip()
self.ssh.connect(self.__Host[0],port=int(self.__Host[1]),username=self.__Host[3],password=self.__Host[4])
except paramiko.ssh_exception.AuthenticationException as err:#Paramiko自定义异常,代表了认证失败
if err.__str__() == "Authentication failed.":#自定义异常都会写一个str方法来装错误信息
print("服务器:%s\t认证错误请检查用户名或密码" % (self.__Host[2],))
return -1#返回错误
except TimeoutError as err:
print("服务器:%s Error:%s" % (self.__Host[2],err))
return -1#返回错误
else:
print("服务器:%s\t加载成功" % (self.__Host[2],))
def exec_command(self,cmd):#命令用完结束
'''此处代码不够稳健,没做异常
'''
stdin,stdout,stderr = self.ssh.exec_command(cmd)
ret = stdout.read().decode('utf-8')
if ret:
print('服务器:%s\t命令执行成功' % (self.__Host[2],))
self.log(ret,cmd)#结果写入日志
else:
print('服务器:%s\t命令执行未成功' % (self.__Host[2],))
return -1
def log(self,ret,cmd):#日志写入
fp = open(('%s_cmd.log' % (self.__Host[2],)),'a')#以追加打开文件,如果没有则创建
nowtime = time.strftime('%Y-%m-%d %X',time.localtime(time.time()))#获取时间
#时间,命令,是否成功,内容
if fp.write(('%s\t \'%s\' \tOK\tret->\n-----------------------------------------------\
\n%s-----------------------------------------------\n' % (nowtime,cmd,ret,))):
print('服务器:%s\t执行结果已成功写入日志' % (self.__Host[2],))
#检测结果是否成功写入,否的话就没写入
else:
print('服务器:%s\t执行结果未成功写入日志' % (self.__Host[2],))
fp.close()
def getqueue(self):
'''
返回消息队列给主线程
'''
return(self.__CmdQueue)
def run(self):
if self.connect():
#print("服务器:%s发生不可逆的错误\t线程结束" % (self.__Host[2],))
return -1
else:
flag = True
while flag:
if not self.__CmdQueue.empty() :
CmdTemp = self.__CmdQueue.get()
if CmdTemp == 'Quit':
flag = False
else:
self.exec_command(CmdTemp)
else:
time.sleep(0.1)
| HuangeHei/ThreadParamiko | _Thread_Distribution/model/Thread_Paramiko.py | Python | gpl-2.0 | 4,584 |
import pymongo
__author__ = 'jslvtr'
class Database(object):
URI = "mongodb://127.0.0.1:27017"
DATABASE = None
@staticmethod
def initialize():
client = pymongo.MongoClient(Database.URI)
Database.DATABASE = client['fullstack']
@staticmethod
def insert(collection, data):
Database.DATABASE[collection].insert(data)
@staticmethod
def find(collection, query):
return Database.DATABASE[collection].find(query)
@staticmethod
def find_one(collection, query):
return Database.DATABASE[collection].find_one(query) | brunotougeiro/python | udemy-python-web-apps/terminal_blog/database.py | Python | gpl-2.0 | 591 |
"""datatable.py
"""
import json, logging, pytz
from flask import current_app, g
from datetime import datetime, time, date, timedelta
from app import get_keys
from app.lib.timer import Timer
log = logging.getLogger(__name__)
#-------------------------------------------------------------------------------
def get_data(start=None, end=None, tag=None):
from bson.json_util import loads,dumps
from dateutil.parser import parse
limit = 1000
t1 = Timer()
if tag and tag == 'routes_new':
g.db = current_app.db_client['bravo']
data = g.db['new_routes'].find(
{'group':'vec'} #, 'date':{'$gte':parse("Sep 7 2017 00:00:00Z")}}
).sort('date',-1).limit(limit)
elif tag and tag == 'test_gsheets':
g.db = current_app.db_client['test']
data = g.db['gsheets'].find(
{'group':'vec'}
).sort('date',-1).limit(limit)
data = loads(dumps(list(data)))
log.debug('Returning %s routes to datatable [%sms]', len(data), t1.clock(t='ms'))
return data
| SeanEstey/Bravo | app/main/datatable.py | Python | gpl-2.0 | 1,043 |
# Generated by Django 2.0 on 2018-05-16 02:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20180515_1458'),
('falta', '0004_auto_20180515_2221'),
]
operations = [
migrations.RemoveField(
model_name='antecipacao',
name='user',
),
migrations.RemoveField(
model_name='reposicao',
name='user',
),
migrations.AddField(
model_name='agendamento',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='agendamentos', to='core.User'),
preserve_default=False,
),
]
| diaspa-nds/cfar | falta/migrations/0005_auto_20180515_2309.py | Python | gpl-2.0 | 823 |
"""Django URL configuration for unrecognized neighbors system."""
from django.conf.urls import patterns, url
urlpatterns = patterns(
'nav.web.neighbors.views',
url(r'^$', 'index', name='neighbors-index'),
url(r'neighbor-state/', 'set_ignored_state', name='neighbors-set-state'),
)
| sigmunau/nav | python/nav/web/neighbors/urls.py | Python | gpl-2.0 | 294 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from builtins import range
import os
from qgis.PyQt import QtGui, uic
from qgis.PyQt.QtCore import pyqtSignal, pyqtSlot, Qt
import math
from qgis.PyQt import QtCore, QtGui
from qgis.PyQt.QtWidgets import QShortcut
from qgis.PyQt.QtGui import QKeySequence
from qgis.PyQt.QtCore import QSettings
from .geometricaAquisition import GeometricaAcquisition
from qgis.core import QgsPointXY, Qgis, QgsWkbTypes
from qgis.gui import QgsMapMouseEvent, QgsMapTool
class Circle(GeometricaAcquisition):
def __init__(self, canvas, iface, action):
super(Circle, self).__init__(canvas, iface, action)
self.canvas = canvas
self.iface = iface
self.rubberBand = None
self.initVariable()
def initVariable(self):
if self.rubberBand:
self.rubberBand.reset(True)
self.rubberBand = None
self.startPoint = None
self.endPoint = None
self.qntPoint = 0
self.geometry = []
def showCircle(self, startPoint, endPoint):
nPoints = 50
x = startPoint.x()
y = startPoint.y()
r = math.sqrt((endPoint.x() - startPoint.x())**2 + (endPoint.y() - startPoint.y())**2)
self.rubberBand.reset(self.iface.activeLayer().geometryType())
for itheta in range(nPoints+1):
theta = itheta*(2.0*math.pi/nPoints)
self.rubberBand.addPoint(QgsPointXY(x+r*math.cos(theta), y+r*math.sin(theta)))
self.rubberBand.closePoints()
def endGeometry(self):
self.geometry = self.rubberBand.asGeometry()
self.createGeometry(self.geometry)
def canvasReleaseEvent(self, event):
if event.button() == Qt.LeftButton:
if not self.startPoint:
self.startPoint = QgsPointXY(event.mapPoint())
self.rubberBand = self.getRubberBand()
if event.button() == Qt.RightButton:
self.endGeometry()
def canvasMoveEvent(self, event):
if self.snapCursorRubberBand:
self.snapCursorRubberBand.hide()
self.snapCursorRubberBand.reset(geometryType=QgsWkbTypes.PointGeometry)
self.snapCursorRubberBand = None
oldPoint = QgsPointXY(event.mapPoint())
event.snapPoint()
point = QgsPointXY(event.mapPoint())
if oldPoint != point:
self.createSnapCursor(point)
if self.startPoint:
self.endPoint = QgsPointXY(event.mapPoint())
self.showCircle(self.startPoint, self.endPoint)
| lcoandrade/DsgTools | gui/ProductionTools/MapTools/Acquisition/circle.py | Python | gpl-2.0 | 2,590 |
Subsets and Splits