repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
borgaster/SpaceWarsEvolved | main.py | 1 | 16816 | import time
from animation import *
from asteroidField import *
from background import *
from loader import *
from physics import *
from player import *
from powerup import *
import pygame
from pygame.locals import *
from rotatingMenu_img import *
from spacemenu import *
from starField import *
# teclas dos jogadores default
keyPresset1 = [K_LEFT,K_RIGHT,K_UP,K_DOWN, K_SPACE, K_m]
keyPresset2 = [K_a, K_d, K_w, K_s, K_x, K_r]
pygame.init()
def game(numkills,nave1,nave2):
SCREENSIZE = [800,600]
#screen = pygame.display.set_mode(SCREENSIZE,pygame.FULLSCREEN)
## uncomment for debug
screen = pygame.display.set_mode(SCREENSIZE)
pygame.mouse.set_visible(0)
clock = pygame.time.Clock()
#init background
background = Background(screen,'galaxy.jpg')
#init efeito campo estrelado e asteroids
starfield = StarField(screen)
asteroidField = AsteroidField(screen)
#init musica
rand = random.randrange(0,2)
# if rand == 0:
# load_music('After Burner.mp3')
#else:
#load_music('Spybreak.mp3')
#load_music('Gundam.mp3')
#init players
player1 = Player((200,SCREENSIZE[1]/2),keyPresset1,1,nave1,numkills)
playerSprite1 = pygame.sprite.RenderPlain((player1))
player1.spin(90,3)
player2 = Player((SCREENSIZE[0]-200,SCREENSIZE[1]/2),keyPresset2,2,nave2,numkills)
playerSprite2 = pygame.sprite.RenderPlain((player2))
player2.spin(90,1)
#powerup stuff variables
powerups_on_screen = False
done = False
retval = 0
powerup_available = 0
#vars apenas para animacao do rapaz no canto do ecra
i = random.randrange(1,4)
pickup_timer = 0
while not done:
clock.tick(40)
#se nao ha asteroides, respawn
current_asteroids = len(asteroidField.asteroidSprites)
if current_asteroids <= 0:
current_asteroids = asteroidField.refresh(asteroidField.num_asteroids +1)
if pickup_timer != 0:
elapsed = round(time.clock())
##desenhar informacoes do jogadores
font = pygame.font.SysFont("consola", 20)
ScorePanel1 ="Player 1 - Lives: "+str(player1.statistics[0])+" "+"Score: "+str(player1.statistics[3])
scorePlayer1 = font.render(ScorePanel1, True, (255,255,255))
if nave2 != 0:
ScorePanel2 ="Player 2 - Lives: "+str(player2.statistics[0])+" Score: "+str(player2.statistics[3])
scorePlayer2 = font.render(ScorePanel2, True, (255,255,255))
# desenhar informacoes de powerups disponiveis
font = pygame.font.SysFont("consola", 40)
PowerupPanel = ""
if powerups_on_screen == False:
poweruppanel = font.render(PowerupPanel, True, (0,255,0))
#############################
##MOVER JOGADORES
#se esta so um jogador
if nave2 == 0:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
elif event.key == keyPresset1[0]:
player1.dx = -10
player1.spin(90,1)
elif event.key == keyPresset1[1]:
player1.dx = 10
player1.spin(90,3)
elif event.key == keyPresset1[2]:
player1.dy = -10
player1.spin(90,0)
elif event.key == keyPresset1[3]:
player1.dy = 10
player1.spin(90,2)
elif event.type == KEYUP:
if event.key == keyPresset1[0]:
player1.dx = -3
elif event.key == keyPresset1[1]:
player1.dx = 3
elif event.key == keyPresset1[2]:
player1.dy = -3
elif event.key == keyPresset1[3]:
player1.dy = 3
elif event.key == keyPresset1[5]:
player1.changeWeapon()
# ha dois jogadores a jogar, apanhar teclas todas
else:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
elif event.key == keyPresset1[0]:
player1.dx = -10
player1.spin(90,1)
elif event.key == keyPresset1[1]:
player1.dx = 10
player1.spin(90,3)
elif event.key == keyPresset1[2]:
player1.dy = -10
player1.spin(90,0)
elif event.key == keyPresset1[3]:
player1.dy = 10
player1.spin(90,2)
elif event.key == keyPresset2[0]:
player2.dx = -10
player2.spin(90,1)
elif event.key == keyPresset2[1]:
player2.dx = 10
player2.spin(90,3)
elif event.key == keyPresset2[2]:
player2.dy = -10
player2.spin(90,0)
elif event.key == keyPresset2[3]:
player2.dy = 10
player2.spin(90,2)
elif event.type == KEYUP:
if event.key == keyPresset1[0]:
player1.dx = -3
elif event.key == keyPresset1[1]:
player1.dx = 3
elif event.key == keyPresset1[2]:
player1.dy = -3
elif event.key == keyPresset1[3]:
player1.dy = 3
elif event.key == keyPresset1[5]:
player1.changeWeapon()
elif event.key == keyPresset2[0]:
player2.dx = -3
elif event.key == keyPresset2[1]:
player2.dx = 3
elif event.key == keyPresset2[2]:
player2.dy = -3
elif event.key == keyPresset2[3]:
player2.dy = 3
elif event.key == keyPresset2[5]:
player2.changeWeapon()
background.update()
starfield.update()
#calcular tempo de activacao de um powerup novo e o tipo
#se estiver em single player so ha powerup de armas
activate_powerups = random.randrange(0,200)
if nave2 != 0:
powerup_type = random.randrange(1,4)
else:
powerup_type = 2
if activate_powerups == 150:
if powerups_on_screen == False:
powerup_available = powerup_type
if (powerup_type == 1):
PowerupPanel = "Health Powerup Available!"
poweruppanel = font.render(PowerupPanel, True, (0,255,0))
elif powerup_type == 2:
PowerupPanel = "Weapon Powerup Available!"
poweruppanel = font.render(PowerupPanel, True, (255,0,0))
else:
PowerupPanel = "Mines Available!!"
poweruppanel = font.render(PowerupPanel, True, (255,0,0))
powerup = Powerup(powerup_available,SCREENSIZE)
powerupSprite = pygame.sprite.RenderPlain((powerup))
powerups_on_screen = True
## POWERUP JA ESTA NO ECRA
########################
#calculos de intersects
#Calcular colisoes de lasers entre jogadores
kill = lasers(player1,player2,playerSprite1,playerSprite2,asteroidField)
#se matou algum jogador, sai
if kill == 1:
done = True
kill = asteroids(player1,player2,playerSprite1,playerSprite2,asteroidField)
#se matou algum jogador, sai
if kill == 1:
done = True
#apanhar powerups
if powerups_on_screen == True:
retval = pickup_powerup(powerup,powerupSprite,player1,playerSprite1,powerup_available)
if retval == 1:
retval = 0
powerups_on_screen = False
if powerup.tipo == 2 and powerup.damagefactor == 4:
pickup_timer = round(time.clock())
elapsed = pickup_timer
else:
retval = pickup_powerup(powerup,powerupSprite,player2,playerSprite2,powerup_available)
if retval == 1:
retval = 0
powerups_on_screen = False
if powerup.tipo == 2 and powerup.damagefactor == 4:
pickup_timer = round(time.clock())
elapsed = pickup_timer
#############################
# Desenhar
#desenhar jogador 1
screen.blit(scorePlayer1, (10, 740))
playerSprite1.update(screen)
playerSprite1.draw(screen)
player1.draw_health(screen)
player1.draw_stats(screen)
#desenhar jogador 2
if nave2 != 0:
screen.blit(scorePlayer2, (10, 750))
playerSprite2.update(screen)
playerSprite2.draw(screen)
player2.draw_health(screen)
player2.draw_stats(screen)
#powerups
screen.blit(poweruppanel, (350, 10))
if powerups_on_screen == True:
powerupSprite.draw(screen)
#desenhar powerup_pickups
for sprite in weapon_pickups:
sprite.render(screen,False)
for sprite in health_pickups:
sprite.render(screen,False)
#desenhar asteroides
asteroidField.update()
#desenhar explosoes
for sprite in explosoes:
sprite.render(screen,False)
#desenhar humor pic
if pickup_timer != 0:
if (elapsed - pickup_timer) < 1.5:
toasty_pic, toasty_rect = load_image("toasty"+str(i)+".PNG", -1)
screen.blit(toasty_pic,(885,650))
else:
pickup_timer = 0
#Alterei o random pois o grau de aleatoriedade eh baixo
#desta forma aparecemos todos mais vezes :)
listagem=[1,2,3,4]
random.shuffle(listagem)
random.shuffle(listagem)
i = listagem[0]
pygame.display.flip()
##FIM DO WHILE
#####################################
stop_music()
pygame.display.set_mode([800,600])
return player1,player2
def main():
pygame.init()
SCREENSIZE = [800,600]
screen = pygame.display.set_mode(SCREENSIZE)
pygame.display.set_caption("Space War Evolved")
pygame.mouse.set_visible(0)
#init musica
#load_music('menu.mp3')
clock = pygame.time.Clock()
SP, rect = load_image("SP.png", -1)
MP, rect2 = load_image("MP.png", -1)
S, rect3 = load_image("S.png", -1)
H, rect4 = load_image("H.png", -1)
A, rect5 = load_image("A.png", -1)
E, rect6 = load_image("E.png", -1)
SP_red, rect = load_image("SP_red_35_433.png", -1)
MP_red, rect = load_image("MP_red_93_433.png", -1)
S_red, rect = load_image("S_red_151_478.png", -1)
H_red, rect = load_image("H_red_93_478.png", -1)
A_red, rect = load_image("A_red_151_433.png", -1)
E_red, rect = load_image("E_red_35_478.png", -1)
extra, rect = load_image("extra.png", -1)
multi = []
multi_images = load_sliced_sprites(221,34,'multi_player_anim_221x34.png')
single = []
single_images = load_sliced_sprites(243,34,'single_anim_243x34.png')
help = []
help_images = load_sliced_sprites(74,35,'help_anim_74x35.png')
about = []
about_images = load_sliced_sprites(112,29,'about_anim_112x29.png')
exit = []
exit_images = load_sliced_sprites(74,28,'exit_anim_74x28.png')
setkeys = []
setkeys_images = load_sliced_sprites(179,29,'setkeys_anim_179x29.png')
jiproj = []
jiproj_images = load_sliced_sprites(128,160,'ji_proj_128x160.png')
jiproj.append(AnimatedSprite(jiproj_images,129,31))
autores = []
autores_images = load_sliced_sprites(111,160,'autores.png')
autores.append(AnimatedSprite(autores_images,129,217))
moverCursor = load_sound('moverCursor.wav')
moverCursor.set_volume(0.2)
clock = pygame.time.Clock()
menu = RotatingMenu(x=520, y=295, radius=160, arc=pi, defaultAngle=pi/2.0)
background = Background(screen,'Stargate_menu.png')
menu.addItem(MenuItem(H))
menu.addItem(MenuItem(S))
menu.addItem(MenuItem(SP))
menu.addItem(MenuItem(MP))
menu.addItem(MenuItem(A))
menu.addItem(MenuItem(E))
menu.selectItem(2)
#Loop
while True:
#Handle events
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
return False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
moverCursor.play()
menu.selectItem(menu.selectedItemNumber + 1)
if event.key == pygame.K_RIGHT:
moverCursor.play()
menu.selectItem(menu.selectedItemNumber - 1)
if event.key == pygame.K_RETURN:
if menu.selectedItemNumber == 0:
option2()
elif menu.selectedItemNumber == 1:
option4()
elif menu.selectedItemNumber == 2:
option0()
elif menu.selectedItemNumber == 3:
option1()
elif menu.selectedItemNumber == 4:
option3()
elif menu.selectedItemNumber == 5:
option5()
return False
#Update stuff
background.update()
menu.update()
for sprite in jiproj:
sprite.render(screen,True)
for sprite in autores:
sprite.render(screen,True)
screen.blit(extra, (124,24))
if menu.selectedItemNumber == 0:
single = []
multi = []
exit = []
about = []
setkeys = []
screen.blit(H_red, (93,478))
help.append(AnimatedSprite(help_images,490,280))
elif menu.selectedItemNumber == 1:
single = []
help = []
exit = []
about = []
multi = []
screen.blit(S_red, (151,478))
setkeys.append(AnimatedSprite(setkeys_images,435,280))
elif menu.selectedItemNumber == 2:
help = []
multi = []
exit = []
about = []
setkeys = []
screen.blit(SP_red, (35,433))
single.append(AnimatedSprite(single_images,403,280))
elif menu.selectedItemNumber == 3:
single = []
help = []
exit = []
about = []
setkeys = []
screen.blit(MP_red, (93,433))
multi.append(AnimatedSprite(multi_images,410,280))
elif menu.selectedItemNumber == 4:
single = []
multi = []
exit = []
help = []
setkeys = []
screen.blit(A_red, (151,433))
about.append(AnimatedSprite(about_images,470,280))
elif menu.selectedItemNumber == 5:
single = []
multi = []
help = []
about = []
setkeys = []
screen.blit(E_red, (35,478))
exit.append(AnimatedSprite(exit_images,490,280))
for sprite in multi:
sprite.render(screen,True)
for sprite in single:
sprite.render(screen,True)
for sprite in about:
sprite.render(screen,True)
for sprite in exit:
sprite.render(screen,True)
for sprite in help:
sprite.render(screen,True)
for sprite in setkeys:
sprite.render(screen,True)
#Draw stuff
#display.fill((0,0,0))
menu.draw(screen)
pygame.display.flip() #Show the updated scene
clock.tick(fpsLimit) #Wait a little
if __name__ == "__main__":
main()
| mit | -5,715,220,330,823,720,000 | 33.178862 | 110 | 0.506185 | false | 3.822687 | false | false | false |
kgullikson88/GSSP_Analyzer | gsspy/fitting.py | 1 | 19991 | from __future__ import print_function, division, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import subprocess
from astropy.io import fits
from astropy import time
import DataStructures
from ._utils import combine_orders, read_grid_points, ensure_dir
from .analyzer import GSSP_Analyzer
import logging
import glob
home = os.environ['HOME']
GSSP_EXE = '{}/Applications/GSSP/GSSP_single/GSSP_single'.format(home)
GSSP_ABUNDANCE_TABLES = '{}/Applications/GSSPAbundance_Tables/'.format(home)
GSSP_MODELS = '/media/ExtraSpace/GSSP_Libraries/LLmodels/'
class GSSP_Fitter(object):
teff_minstep = 100
logg_minstep = 0.1
feh_minstep = 0.1
vsini_minstep = 10
vmicro_minstep = 0.1
def __init__(self, filename, gssp_exe=None, abund_tab=None, models_dir=None):
"""
A python wrapper to the GSSP code (must already be installed)
Parameters:
===========
filename: string
The filename of the (flattened) fits spectrum to fit.
gssp_exe: string (optional)
The full path to the gssp executable file
abund_tab: string (optional)
The full path to the directory containing
GSSP abundance tables.
models_dir: string:
The full path to the directory containing
GSSP atmosphere models.
Methods:
==========
fit: Fit the parameters
"""
if gssp_exe is None:
gssp_exe = GSSP_EXE
if abund_tab is None:
abund_tab = GSSP_ABUNDANCE_TABLES
if models_dir is None:
models_dir = GSSP_MODELS
# Read in the file and combine the orders
orders = self._read_fits_file(filename)
combined = combine_orders(orders)
#TODO: Cross-correlate the data to get it close. GSSP might have trouble with huge RVs...
# Get the object name/date
header = fits.getheader(filename)
star = header['OBJECT']
date = header['DATE-OBS']
try:
jd = time.Time(date, format='isot', scale='utc').jd
except TypeError:
jd = time.Time('{}T{}'.format(date, header['UT']), format='isot',
scale='utc').jd
# Save the data to an ascii file
output_basename = '{}-{}'.format(star.replace(' ', ''), jd)
np.savetxt('data_sets/{}.txt'.format(output_basename),
np.transpose((combined.x, combined.y)),
fmt='%.10f')
# Save some instance variables
self.data = combined
self.jd = jd
self.starname = star
self.output_basename = output_basename
self.gssp_exe = os.path.abspath(gssp_exe)
self.abundance_table = abund_tab
self.model_dir = models_dir
self.gssp_gridpoints = read_grid_points(models_dir)
def _run_gssp(self, teff_lims=(7000, 30000), teff_step=1000,
logg_lims=(3.0, 4.5), logg_step=0.5,
feh_lims=(-0.5, 0.5), feh_step=0.5,
vsini_lims=(50, 350), vsini_step=50,
vmicro_lims=(1, 5), vmicro_step=1,
R=80000, ncores=1):
"""
Coarsely fit the parameters Teff, log(g), and [Fe/H].
"""
# First, make sure the inputs are reasonable.
teff_step = max(teff_step, self.teff_minstep)
logg_step = max(logg_step, self.logg_minstep)
feh_step = max(feh_step, self.feh_minstep)
vsini_step = max(vsini_step, self.vsini_minstep)
vmicro_step = max(vmicro_step, self.vmicro_minstep)
teff_lims = (min(teff_lims), max(teff_lims))
logg_lims = (min(logg_lims), max(logg_lims))
feh_lims = (min(feh_lims), max(feh_lims))
vsini_lims = (min(vsini_lims), max(vsini_lims))
vmicro_lims = (min(vmicro_lims), max(vmicro_lims))
teff_lims, logg_lims, feh_lims = self._check_grid_limits(teff_lims,
logg_lims,
feh_lims)
# Make the input file for GSSP
inp_file=self._make_input_file(teff_lims=teff_lims, teff_step=teff_step,
logg_lims=logg_lims, logg_step=logg_step,
feh_lims=feh_lims, feh_step=feh_step,
vsini_lims=vsini_lims, vsini_step=vsini_step,
vmicro_lims=vmicro_lims, vmicro_step=vmicro_step,
resolution=R)
# Run GSSP
subprocess.check_call(['mpirun', '-n', '{}'.format(ncores),
'{}'.format(self.gssp_exe),
'{}'.format(inp_file)])
# Move the output directory to a new name that won't be overridden
output_dir = '{}_output'.format(self.output_basename)
ensure_dir(output_dir)
for f in glob.glob('output_files/*'):
subprocess.check_call(['mv', f, '{}/'.format(output_dir)])
return
def fit(self, teff_lims=(7000, 30000), teff_step=1000,
logg_lims=(3.0, 4.5), logg_step=0.5,
feh_lims=(-0.5, 0.5), feh_step=0.5,
vsini_lims=(50, 350), vsini_step=50,
vmicro_lims=(1, 5), vmicro_step=1,
R=80000, ncores=1, refine=True):
"""
Fit the stellar parameters with GSSP
Parameters:
=============
par_lims: iterable with (at least) two objects
The limits on the given parameter. 'par' can be one of:
1. teff: The effective temperature
2. logg: The surface gravity
3. feh: The metallicity [Fe/H]
4. vsini: The rotational velocity
5. vmicro: The microturbulent velocity
The default values are a very large, very course grid.
Consider refining based on spectral type first!
par_step: float
The initial step size to take in the given parameter.
'par' can be from the same list as above.
R: float
The spectrograph resolving power (lambda/delta-lambda)
ncores: integer, default=1
The number of cores to use in the GSSP run.
refine: boolean
Should we run GSSP again with a smaller grid after the
initial fit? If yes, the best answers will probably be
better.
Returns:
=========
A pd.Series object with the best parameters
"""
# Run GSSP
self._run_gssp(teff_lims=teff_lims, teff_step=teff_step,
logg_lims=logg_lims, logg_step=logg_step,
feh_lims=feh_lims, feh_step=feh_step,
vsini_lims=vsini_lims, vsini_step=vsini_step,
vmicro_lims=vmicro_lims, vmicro_step=vmicro_step,
R=R, ncores=ncores)
# Look at the output and save the figures
output_dir = '{}_output'.format(self.output_basename)
best_pars, figs = GSSP_Analyzer(output_dir).estimate_best_parameters()
for par in figs.keys():
fig = figs[par]
fig.savefig(os.path.join(output_dir, '{}_course.pdf'.format(par)))
plt.close('all')
if not refine:
return best_pars
# If we get here, we should restrict the grid near the
# best solution and fit again
teff_lims = self._get_refined_limits(lower=best_pars['1sig_CI_lower_Teff'],
upper=best_pars['1sig_CI_upper_Teff'],
values=self.gssp_gridpoints.teff)
logg_lims = self._get_refined_limits(lower=best_pars['1sig_CI_lower_logg'],
upper=best_pars['1sig_CI_upper_logg'],
values=self.gssp_gridpoints.logg)
feh_lims = self._get_refined_limits(lower=best_pars['1sig_CI_lower_feh'],
upper=best_pars['1sig_CI_upper_feh'],
values=self.gssp_gridpoints.feh)
vsini_lower = best_pars.best_vsini*(1-1.5) + 1.5*best_pars['1sig_CI_lower_vsini']
vsini_upper = best_pars.best_vsini*(1-1.5) + 1.5*best_pars['1sig_CI_upper_vsini']
vsini_lims = (max(10, vsini_lower), min(400, vsini_upper))
vsini_step = max(self.vsini_minstep, (vsini_lims[1] - vsini_lims[0])/10)
vmicro_lims = (best_pars.micro_turb, best_pars.micro_turb)
# Rename the files in the output directory so they don't get overwritten
file_list = ['CCF.dat', 'Chi2_table.dat',
'Observed_spectrum.dat', 'Synthetic_best_fit.rgs']
ensure_dir(os.path.join(output_dir, 'course_output', ''))
for f in file_list:
original_fname = os.path.join(output_dir, f)
new_fname = os.path.join(output_dir, 'course_output', f)
subprocess.check_call(['mv', original_fname, new_fname])
# Run GSSP on the refined grid
self._run_gssp(teff_lims=teff_lims, teff_step=self.teff_minstep,
logg_lims=logg_lims, logg_step=self.logg_minstep,
feh_lims=feh_lims, feh_step=self.feh_minstep,
vsini_lims=vsini_lims, vsini_step=round(vsini_step),
vmicro_lims=vmicro_lims, vmicro_step=vmicro_step,
R=R, ncores=ncores)
best_pars, figs = GSSP_Analyzer(output_dir).estimate_best_parameters()
for par in figs.keys():
fig = figs[par]
fig.savefig(os.path.join(output_dir, '{}_fine.pdf'.format(par)))
fig.close()
return best_pars
def _check_grid_limits_old(self, teff_lims, logg_lims, feh_lims):
df = self.gssp_gridpoints[['teff', 'logg', 'feh']].drop_duplicates()
# First, check if the limits are do-able
lower = df.loc[(df.teff <= teff_lims[0]) &
(df.logg <= logg_lims[0]) &
(df.feh <= feh_lims[0])]
upper = df.loc[(df.teff >= teff_lims[1]) &
(df.logg >= logg_lims[1]) &
(df.feh >= feh_lims[1])]
if len(upper) >= 1 and len(lower) >= 1:
return teff_lims, logg_lims, feh_lims
# If we get here, there is a problem...
# Check temperature first:
if not (len(df.loc[df.teff <= teff_lims[0]]) >= 1 and
len(df.loc[df.teff >= teff_lims[1]]) >= 1):
# Temperature grid is no good.
low_teff, high_teff = df.teff.min(), df.teff.max()
print('The temperature grid is not available in the model library!')
print('You wanted temperatures from {} - {}'.format(*teff_lims))
print('The model grid extends from {} - {}'.format(low_teff, high_teff))
new_teff_lims = (max(low_teff, teff_lims[0]),
min(high_teff, teff_lims[1]))
print('Resetting temperature limits to {} - {}'.format(*new_teff_lims))
return self._check_grid_limits(new_teff_lims, logg_lims, feh_lims)
# Check log(g) next:
teff_df = df.loc[(df.teff >= teff_lims[0]) & (df.teff <= teff_lims[1])]
if not (len(teff_df.loc[df.logg <= logg_lims[0]]) >= 1 and
len(teff_df.loc[df.logg >= logg_lims[1]]) >= 1):
# Temperature grid is no good.
low_logg, high_logg = df.logg.min(), df.logg.max()
print('The log(g) grid is not available in the model library!')
print('You wanted log(g) from {} - {}'.format(*logg_lims))
print('The model grid extends from {} - {}'.format(low_logg, high_logg))
new_logg_lims = (max(low_logg, logg_lims[0]),
min(high_logg, logg_lims[1]))
print('Resetting log(g) limits to {} - {}'.format(*new_logg_lims))
return self._check_grid_limits(teff_lims, new_logg_lims, feh_lims)
# Finally, check [Fe/H]:
subset_df = df.loc[(df.teff >= teff_lims[0]) &
(df.teff <= teff_lims[1]) *
(df.logg >= logg_lims[0]) &
(df.logg <= logg_lims[1])]
if not (len(subset_df.loc[df.feh <= feh_lims[0]]) >= 1 and
len(subset_df.loc[df.feh >= feh_lims[1]]) >= 1):
# Temperature grid is no good.
low_feh, high_feh = df.feh.min(), df.feh.max()
print('The [Fe/H] grid is not available in the model library!')
print('You wanted [Fe/H] from {} - {}'.format(*feh_lims))
print('The model grid extends from {} - {}'.format(low_feh, high_feh))
new_feh_lims = (max(low_feh, feh_lims[0]),
min(high_feh, feh_lims[1]))
print('Resetting [Fe/H] limits to {} - {}'.format(*new_feh_lims))
return self._check_grid_limits(teff_lims, logg_lims, new_feh_lims)
# We should never get here
raise ValueError('Something weird happened while checking limits!')
def _check_grid_limits(self, teff_lims, logg_lims, feh_lims):
df = self.gssp_gridpoints[['teff', 'logg', 'feh']].drop_duplicates()
# First, check if the limits are do-able as is
lower = df.loc[(df.teff == teff_lims[0]) & (df.feh == feh_lims[0])]
upper = df.loc[(df.teff == teff_lims[1]) & (df.feh == feh_lims[1])]
if (lower.logg.min() <= logg_lims[0] and
lower.logg.max() >= logg_lims[1] and
upper.logg.min() <= logg_lims[0] and
upper.logg.max() >= logg_lims[1]):
return teff_lims, logg_lims, feh_lims
# If we get here, there is a problem...
# Check temperature first:
low_teff, high_teff = df.teff.min(), df.teff.max()
if low_teff > teff_lims[0] or high_teff < teff_lims[1]:
print('The temperature grid is not available in the model library!')
print('You wanted temperatures from {} - {}'.format(*teff_lims))
print('The model grid extends from {} - {}'.format(low_teff, high_teff))
new_teff_lims = (max(low_teff, teff_lims[0]),
min(high_teff, teff_lims[1]))
print('Resetting temperature limits to {} - {}'.format(*new_teff_lims))
return self._check_grid_limits(new_teff_lims, logg_lims, feh_lims)
# Check [Fe/H] next
subset_df = df.loc[(df.teff >= teff_lims[0]) &
(df.teff <= teff_lims[1])]
low_feh, high_feh = subset_df.feh.min(), subset_df.feh.max()
if low_feh > feh_lims[0] or high_feh < feh_lims[1]:
print('The [Fe/H] grid is not available in the model library!')
print('You wanted [Fe/H] from {} - {}'.format(*feh_lims))
print('The model grid extends from {} - {}'.format(low_feh, high_feh))
new_feh_lims = (max(low_feh, feh_lims[0]),
min(high_feh, feh_lims[1]))
print('Resetting [Fe/H] limits to {} - {}'.format(*new_feh_lims))
return self._check_grid_limits(teff_lims, logg_lims, new_feh_lims)
# Finally, check log(g)
subset_df = subset_df.loc[(subset_df.feh >= feh_lims[0]) &
(subset_df.feh <= feh_lims[1])]
low_logg, high_logg = subset_df.logg.min(), subset_df.logg.max()
if low_logg > logg_lims[0] or high_logg < logg_lims[1]:
print('The log(g) grid is not available in the model library!')
print('You wanted log(g) from {} - {}'.format(*logg_lims))
print('The model grid extends from {} - {}'.format(low_logg, high_logg))
new_logg_lims = (max(low_logg, logg_lims[0]),
min(high_logg, logg_lims[1]))
print('Resetting log(g) limits to {} - {}'.format(*new_logg_lims))
return self._check_grid_limits(teff_lims, new_logg_lims, feh_lims)
# We should never get here
raise ValueError('Something weird happened while checking limits!')
def _get_refined_limits(self, lower, upper, values):
"""
Get the items in the 'values' array that are just
less than lower and just more than upper.
"""
unique_values = sorted(np.unique(values))
l_idx = np.searchsorted(unique_values, lower, side='left')
r_idx = np.searchsorted(unique_values, upper, side='right')
if l_idx > 0:
l_idx -= 1
if r_idx < len(unique_values) - 1:
r_idx += 1
return unique_values[l_idx], unique_values[r_idx]
def _read_fits_file(self, fname):
orders = []
hdulist = fits.open(fname)
for i, hdu in enumerate(hdulist[1:]):
xypt = DataStructures.xypoint(x=hdu.data['wavelength'],
y=hdu.data['flux'],
cont=hdu.data['continuum'],
err=hdu.data['error'])
xypt.x *= 10 #Convert from nanometers to angstrom
orders.append(xypt)
return orders
def _make_input_file(self, teff_lims, teff_step, logg_lims, logg_step,
feh_lims, feh_step, vsini_lims, vsini_step,
vmicro_lims, vmicro_step, resolution):
""" Make the input file for the given star
"""
output_string = '{:.1f} {:.0f} {:.1f}\n'.format(teff_lims[0],
teff_step,
teff_lims[-1])
output_string += '{:.1f} {:.1f} {:.1f}\n'.format(logg_lims[0],
logg_step,
logg_lims[1])
output_string += '{:.1f} {:.1f} {:.1f}\n'.format(vmicro_lims[0],
vmicro_step,
vmicro_lims[1])
output_string += '{:.1f} {:.1f} {:.1f}\n'.format(vsini_lims[0],
vsini_step,
vsini_lims[1])
output_string += "skip 0.03 0.02 0.07 !dilution factor\n"
output_string += 'skip {:.1f} {:.1f} {:.1f}\n'.format(feh_lims[0],
feh_step,
feh_lims[1])
output_string += 'He 0.04 0.005 0.06 ! Individual abundance\n'
output_string += '0.0 {:.0f}\n'.format(resolution)
output_string += '{}\n{}\n'.format(self.abundance_table, self.model_dir)
output_string += '2 1 !atmosphere model vmicro and mass\n'
output_string += 'ST ! model atmosphere chemical composition flag\n'
dx = self.data.x[1] - self.data.x[0]
output_string += '1 {:.5f} fit\n'.format(dx)
output_string += 'data_sets/{}.txt\n'.format(self.output_basename)
output_string += '0.5 0.99 0.0 adjust ! RV determination stuff\n'
xmin, xmax = self.data.x[0]-1, self.data.x[-1]+1
output_string += '{:.1f} {:.1f}\n'.format(xmin, xmax)
outfilename = '{}.inp'.format(self.output_basename)
with open(outfilename, 'w') as outfile:
outfile.write(output_string)
return outfilename
| mit | -6,591,718,950,300,025,000 | 43.523385 | 97 | 0.516182 | false | 3.486397 | false | false | false |
JuBra/GEMEditor | GEMEditor/database/ui/MetaboliteEntryDisplayWidget.py | 1 | 8059 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\MetaboliteEntryDisplayWidget.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MetaboliteEntryDisplayWidget(object):
def setupUi(self, MetaboliteEntryDisplayWidget):
MetaboliteEntryDisplayWidget.setObjectName("MetaboliteEntryDisplayWidget")
MetaboliteEntryDisplayWidget.resize(333, 465)
self.formLayout = QtWidgets.QFormLayout(MetaboliteEntryDisplayWidget)
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label)
self.label_name = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_name.sizePolicy().hasHeightForWidth())
self.label_name.setSizePolicy(sizePolicy)
self.label_name.setText("")
self.label_name.setWordWrap(True)
self.label_name.setObjectName("label_name")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.label_name)
self.label_4 = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicy)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.label_formula = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_formula.sizePolicy().hasHeightForWidth())
self.label_formula.setSizePolicy(sizePolicy)
self.label_formula.setText("")
self.label_formula.setObjectName("label_formula")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.label_formula)
self.label_2 = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.label_charge = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_charge.sizePolicy().hasHeightForWidth())
self.label_charge.setSizePolicy(sizePolicy)
self.label_charge.setText("")
self.label_charge.setObjectName("label_charge")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.label_charge)
self.label_3 = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
self.label_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.list_synonyms = QtWidgets.QListWidget(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.list_synonyms.sizePolicy().hasHeightForWidth())
self.list_synonyms.setSizePolicy(sizePolicy)
self.list_synonyms.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.list_synonyms.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.list_synonyms.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.list_synonyms.setLayoutMode(QtWidgets.QListView.SinglePass)
self.list_synonyms.setObjectName("list_synonyms")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.list_synonyms)
self.label_5 = QtWidgets.QLabel(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_5.sizePolicy().hasHeightForWidth())
self.label_5.setSizePolicy(sizePolicy)
self.label_5.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.table_identifiers = AnnotationTableWidget(MetaboliteEntryDisplayWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.table_identifiers.sizePolicy().hasHeightForWidth())
self.table_identifiers.setSizePolicy(sizePolicy)
self.table_identifiers.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.table_identifiers.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.table_identifiers.setWordWrap(False)
self.table_identifiers.setObjectName("table_identifiers")
self.table_identifiers.setColumnCount(0)
self.table_identifiers.setRowCount(0)
self.table_identifiers.horizontalHeader().setStretchLastSection(True)
self.table_identifiers.verticalHeader().setVisible(False)
self.table_identifiers.verticalHeader().setHighlightSections(False)
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.table_identifiers)
self.retranslateUi(MetaboliteEntryDisplayWidget)
QtCore.QMetaObject.connectSlotsByName(MetaboliteEntryDisplayWidget)
def retranslateUi(self, MetaboliteEntryDisplayWidget):
_translate = QtCore.QCoreApplication.translate
MetaboliteEntryDisplayWidget.setWindowTitle(_translate("MetaboliteEntryDisplayWidget", "Form"))
self.label.setText(_translate("MetaboliteEntryDisplayWidget", "Name:"))
self.label_4.setText(_translate("MetaboliteEntryDisplayWidget", "Formula:"))
self.label_2.setText(_translate("MetaboliteEntryDisplayWidget", "Charge:"))
self.label_3.setText(_translate("MetaboliteEntryDisplayWidget", "Synonyms:"))
self.label_5.setText(_translate("MetaboliteEntryDisplayWidget", "Identifier:"))
from GEMEditor.base.widgets import AnnotationTableWidget
| gpl-3.0 | 1,646,370,832,036,293,600 | 61.960938 | 106 | 0.755056 | false | 4.057905 | false | false | false |
highlander12rus/whatsupmoscow.ru | demon/main.py | 1 | 3580 | # -*- coding: utf-8 -*-
__author__ = 'meanwhile'
import ssl
import time
import socket
import sys
import logging
import vkontakte
import ProvaderStorage
import Constants
import FileWriter
import ProccessingResponce
import daemon
class VkParserDemon(daemon.Daemon):
def run(self):
#read code for method vk.executin from file
codeFromFile = ''
with open(Constants.Constants.getFileCodeExecute(), 'r') as f:
codeFromFile = f.read()
#read access token from file
access_tokens = [];
with open(Constants.Constants.getFileAccessToken(), 'r') as f:
access_tokens = [token.strip() for token in f]
isValidToken = False;
for acces_token in access_tokens:
try:
vk = vkontakte.API(token=acces_token)
vk.getServerTime() #проверяем соединилось ли
isValidToken = True
break
except vkontakte.VKError, e:
logging.error("vkontakte.VKError ")
except ssl.SSLError, e: #The handshake operation timed out
logging.error("ssl error")
time.sleep(1)
access_tokens.append(acces_token)
if (isValidToken):
storage = ProvaderStorage.ProvaderStorage()
lastTime = vk.getServerTime()
emptyLastTime = 0;
while True:
try:
time.sleep(Constants.Constants.getTimeOutInSec())
codeSending = codeFromFile.replace('%time_replace%', str(lastTime))
json = vk.execute(code=codeSending, timeout=10)
logging.debug("vk_json responce ", json)
fileName = Constants.Constants.getDirHomeScript() + str(time.strftime("%d-%m-%Y")) + ".vkr" #vk raw
file = FileWriter.FileWriterBinary(fileName)
process = ProccessingResponce.ProccessingResponce(storage, file)
process.jsonParse(json)
if json['max_time'] > 0:
lastTime = json['max_time'] + 1
else:
logging.debug("empty json= ", json)
logging.debug("lastTime= ", lastTime)
logging.debug("complidet proccessing")
except ssl.SSLError, e:
logging.error("ssl error")
except socket.timeout, e:
logging.error("socket.timeout")
except vkontakte.VKError, e:
logging.error("vkontakte.VKError")
except AttributeError, e:
logging.error("AttributeError")
else:
#TODO: send emails tokens no correct
logging.error("token uncorrect")
if __name__ == "__main__":
logging.basicConfig(format=u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s',
level=logging.ERROR)
daemon = VkParserDemon('/tmp/daemon-example.pid', stdout='/var/log/vk_parser/stdout.log',
stderr='/var/log/vk_parser/error.log')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
| apache-2.0 | 35,188,145,789,785,108 | 34.58 | 119 | 0.540472 | false | 4.052392 | false | false | false |
rmac75/mboxparser | mbox.py | 1 | 3399 | #!/usr/bin/python2
#--------------------------------
#Takes in mbox, spits out csv with email info and basic geolocation, plus other header fields.
#--------------------------------
#This product includes GeoLite2 data created by MaxMind, available from
#<a href="http://www.maxmind.com">http://www.maxmind.com</a>.
import mailbox
import sys
import csv
import re
from os import path
import pprint
import argparse
import geoip2.database
import geoip2.errors
import pygeoip
import email.utils
from email.utils import getaddresses
def get_iprecord(ip):
try:
geo = reader.city(ip)
org = reader2.org_by_addr(ip)
except (geoip2.errors.AddressNotFoundError, ValueError):
return None,None,None
if geo.city.name:
cityname=geo.city.name.encode('ascii','ignore')
else:
cityname=geo.city.name
return geo.country.iso_code, cityname, org
def main():
# first some sanity tests on the command-line arguments
#sys.argv = ['mbox_to_mysql','list1.mbox','mailman','lists',] # !@!@! APS here for testing purposes only - comment out for live run
parser = argparse.ArgumentParser(description='Parse mbox file')
parser.add_argument('mbox', help='mbox file to parse')
parser.add_argument('outfile', help='output csv file')
args = parser.parse_args()
if not path.isfile(args.mbox):
parser.error("the file %s does not exist"%args.mbox)
mbox = args.mbox
outfile = args.outfile
ipPattern = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
global reader
reader = geoip2.database.Reader('geo/GeoLite2-City.mmdb')
global reader2
reader2 = pygeoip.GeoIP('geo/GeoIPOrg.dat')
f = open(outfile, 'wt')
try:
writer = csv.writer(f)
writer.writerow( ('Date','From','From Email','Return-Path Email','To','To Email','Recipients','X-To','Subject','Received-Last','Org','City', 'Country','X-IP','X-Org', 'X-City', 'X-Country','X-Mailer'))
for message in mailbox.mbox(mbox):
From = str(message['From'])
fname,femail = email.utils.parseaddr(From)
#print fname
Return = str(message['Return-Path'])
rname,remail = email.utils.parseaddr(Return)
#print remail
To = str(message['To'])
tname,temail = email.utils.parseaddr(To)
tos = message.get_all('to', [])
ccs = message.get_all('cc', [])
resent_tos = message.get_all('resent-to', [])
resent_ccs = message.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
XTo = str(message['X-Apparently-To'])
#findIP = re.findall(ipPattern,s)
Date = str(message['Date'])
Subject = str(message['Subject'])
Received = re.findall(ipPattern,str(message['Received']))
if Received:
#print Received[-1]
country, city, org = get_iprecord(Received[-1])
#print get_iprecord(Received[-1])
#print org
else:
Received = "None"
XIP = message['X-Originating-IP']
if XIP:
XIP = str(XIP).strip('[]')
#print ("XIP: %s." % XIP)
Xcountry, Xcity, Xorg = get_iprecord(XIP)
else:
XIP = "None"
Xcountry = "None"
Xcity = "None"
Xorg = "None"
XMailer = str(message['X-Mailer'])
#Attachment = message.get_filename()
#Body = str(message['Body'])
writer.writerow((Date,fname,femail,remail,tname,temail,all_recipients,XTo,Subject,Received[-1],org,city,country,XIP,Xorg,Xcity,Xcountry,XMailer))
finally:
f.close()
#print open(sys.argv[1], 'rt').read()
if __name__ == '__main__':
main()
| gpl-2.0 | -8,993,837,695,608,072,000 | 28.556522 | 204 | 0.657546 | false | 2.978966 | false | false | false |
enriquecoronadozu/HMPy | src/borrar/modificar/hmpy.py | 1 | 6228 | #!/usr/bin/env python
"""@See preprocessed data
"""
from numpy import*
import matplotlib.pyplot as plt
from GestureModel import*
from Creator import*
from Classifier import*
def plotResults(gr_points,gr_sig, b_points,b_sig,name_model):
from scipy import linalg
import matplotlib.pyplot as plt
gr_points = gr_points.transpose()
b_points = b_points.transpose()
gr_sigma = []
b_sigma = []
n,m = gr_points.shape
maximum = zeros((m))
minimum = zeros((m))
x = arange(0,m,1)
for i in range(m):
gr_sigma.append(gr_sig[i*3:i*3+3])
b_sigma.append(b_sig[i*3:i*3+3])
for i in range(m):
sigma = 3.*linalg.sqrtm(gr_sigma[i])
maximum[i] = gr_points[0,i]+ sigma[0,0];
minimum[i] = gr_points[0,i]- sigma[0,0];
fig2 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, gr_points[0])
plt.savefig(name_model+ "_gravity_x_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(gr_sigma[i])
maximum[i] = gr_points[1,i]+ sigma[1,1];
minimum[i] = gr_points[1,i]- sigma[1,1];
fig3 = plt.figure()
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, gr_points[1])
plt.savefig(name_model+ "_gravity_y_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(gr_sigma[i])
maximum[i] = gr_points[2,i]+ sigma[2,2];
minimum[i] = gr_points[2,i]- sigma[2,2];
fig3 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, gr_points[2])
plt.savefig(name_model+ "_gravity_z_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(b_sigma[i])
maximum[i] = b_points[0,i]+ sigma[0,0];
minimum[i] = b_points[0,i]- sigma[0,0];
fig4 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, b_points[0])
plt.savefig(name_model+ "_body_x_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(b_sigma[i])
maximum[i] = b_points[1,i]+ sigma[1,1];
minimum[i] = b_points[1,i]- sigma[1,1];
fig5 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, b_points[1])
plt.savefig(name_model+ "_body_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(b_sigma[i])
maximum[i] = b_points[2,i]+ sigma[2,2];
minimum[i] = b_points[2,i]- sigma[2,2];
fig6 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, b_points[2])
plt.savefig(name_model+ "_body_z_axis.png")
#NOTE: Add path
def newModel(name,files):
g = Creator()
#Read the data
g.ReadFiles(files,[])
g.CreateDatasets_Acc()
g.ObtainNumberOfCluster()
gravity = g.gravity
K_gravity = g.K_gravity
body = g.body
K_body = g.K_body
# 2) define the number of points to be used in GMR
# (current settings allow for CONSTANT SPACING only)
numPoints = amax(gravity[0,:]);
scaling_factor = 10/10;
numGMRPoints = math.ceil(numPoints*scaling_factor);
# 3) perform Gaussian Mixture Modelling and Regression to retrieve the
# expected curve and associated covariance matrices for each feature
gr_points, gr_sigma = g.GetExpected(gravity,K_gravity,numGMRPoints)
b_points, b_sigma = g.GetExpected(body,K_body,numGMRPoints)
savetxt(name+"MuGravity.txt", gr_points,fmt='%.12f')
savetxt(name+"SigmaGravity.txt", gr_sigma,fmt='%.12f')
savetxt(name+"MuBody.txt", b_points,fmt='%.12f')
savetxt(name+"SigmaBody.txt", b_sigma,fmt='%.12f')
def loadModel(file_name, th=1, plot=True):
#Load files
gr_points = loadtxt(file_name+"MuGravity.txt")
gr_sigma = loadtxt(file_name+"SigmaGravity.txt")
b_points = loadtxt(file_name+"MuBody.txt")
b_sigma = loadtxt(file_name+"SigmaBody.txt")
#Add model
gm = GestureModel()
gm.addModel("gravity",gr_points, gr_sigma,th)
gm.addModel("body",b_points, b_sigma,th)
if plot == True:
plotResults(gr_points,gr_sigma, b_points,b_sigma,file_name)
return gm
name_models = ['A','B','S1','S2']
num_samples = [10,14,9,10]
th = [25,20,10,65]
create_models = False
list_files = []
#Create a list of the list of files for each model
print "Defining files"
i = 0
for name in name_models:
files = []
for k in range(1,num_samples[i]+1):
files.append('Models/' + name + '/data/mod('+ str(k) + ').txt')
list_files.append(files)
i = i + 1
#Create the models and save the list of files for calculate the weigths
if(create_models == True):
print "Creating models"
i = 0
for model in name_models:
print list_files[i]
newModel(model,list_files[i])
i = i + 1
list_models = []
print "Loading models"
#Load the models
for j in range(len(name_models)):
#For the moment don't put True is there are more that 2 models in Ubuntu
gm = loadModel(name_models[j],th[j],False)
list_models.append(gm)
print "Calculating weigths"
#Used to calculate the weights
v0 = Classifier()
for j in range(len(name_models)):
print "\nFor model " + name_models[j] + ":"
w_g, w_b = v0.calculateW(list_files[j],list_models[j])
list_models[j].addWeight("gravity",w_g)
list_models[j].addWeight("body",w_b)
print "\n Init classifers"
l_class = []
for j in range(len(name_models)):
l_class.append(Classifier())
print "Give the model to each classifier"
for j in range(len(name_models)):
l_class[j].classify(list_models[j])
print "Validation"
sfile = "validation/mix3.txt"
import matplotlib.pyplot as plt
fig = plt.figure()
for j in range(len(name_models)):
poss = l_class[j].validate_from_file(sfile, ',')
m,n = poss.shape
x = arange(0,m,1)
plt.plot(x, poss,'o',label= name_models[j])
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.savefig("result.png")
print "Finish ..."
| gpl-3.0 | -8,411,053,304,711,118,000 | 25.278481 | 77 | 0.617213 | false | 2.855571 | false | false | false |
SportySpice/Collections | src/file/File.py | 1 | 3600 | import xbmc
import xbmcvfs
import Folder
import urllib
import urlparse
NAME_QUERY = 'fileName'
FOLDER_NAME_QUERY = 'folderName'
FOLDER_PATH_QUERY = 'folderPath'
class File(object):
def __init__(self, name, folder):
self.name = name
self.folder = folder
self.path = folder.fullpath
self.fullpath = folder.fullpath + '/' + name
if '.' in name:
self.soleName, self.extension = name.split('.', 1)
else:
self.soleName = name
self.extension = None
self._pathTranslated = None
self._fullpathTranslated = None
def exists(self):
return xbmcvfs.exists(self.fullpath)
def delete(self):
xbmcvfs.delete(self.fullpath)
def deleteIfExists(self):
if self.exists():
self.delete()
def pathTranslated(self):
return self.folder.fullpathTranslated()
def fullpathTranslated(self):
if self._fullpathTranslated is None:
self._fullpathTranslated = xbmc.translatePath(self.fullpath)
return self._fullpathTranslated
def fileHandler(self, write=False):
if write:
permission = 'w'
else:
permission = 'r'
fullpath = self.fullpathTranslated()
return xbmcvfs.File(fullpath, permission)
def contents(self):
fh = self.fileHandler();
contents = fh.read()
fh.close()
return contents
def lines(self):
contents = self.contents()
return contents.split('\n')
def write(self, contentsStr):
fh = self.fileHandler(write=True)
fh.write(contentsStr)
fh.close()
def encodedQuery(self):
query = urllib.urlencode({NAME_QUERY: self.name,
FOLDER_NAME_QUERY: self.folder.name,
FOLDER_PATH_QUERY: self.folder.path
})
return query
def dumpObject(self, dumpObject):
import dill as pickle
with open(self.fullpathTranslated(), 'wb') as f:
pickle.dump(dumpObject, f)
def loadObject(self):
import dill as pickle
with open(self.fullpathTranslated(),'rb') as f:
loadedObject = pickle.load(f)
return loadedObject
def fromQuery(query):
parsedQuery = urlparse.parse_qs(query)
name = parsedQuery[NAME_QUERY][0]
folderName = parsedQuery[FOLDER_NAME_QUERY][0]
folderPath = parsedQuery[FOLDER_PATH_QUERY][0]
folder = Folder.Folder(folderName, folderPath)
newFile = File(name, folder)
return newFile
def fromFullpath(fullpath):
folderPath, folderName, fileName = fullpath.rsplit('/', 2)
folder = Folder.Folder(folderName, folderPath)
newFile = File(fileName, folder)
return newFile
def fromNameAndDir(fileName, dirPath):
folder = Folder.fromFullpath(dirPath)
newFile = File(fileName, folder)
return newFile
def fromInvalidNameAndDir(originalName, dirPath):
import utils
name = utils.createValidName(originalName)
return fromNameAndDir(name, dirPath)
def loadObjectFromFP(fullpath):
dumpFile = fromFullpath(fullpath)
return dumpFile.loadObject() | gpl-2.0 | 3,374,206,890,298,877,400 | 21.36646 | 72 | 0.556667 | false | 4.417178 | false | false | false |
ZhangJun-GitHub/Cycle | dialogs.py | 1 | 20748 | #====================================================
# Cycle - calendar for women
# Distributed under GNU Public License
# Original author: Oleg S. Gints
# Maintainer: Matt Molyneaux ([email protected])
# Home page: http://moggers.co.uk/cgit/cycle.git/about
#===================================================
import os
import wx
import wx.html
import cPickle
from cal_year import cycle , Val
from save_load import Load_Cycle, get_f_name, set_color_default
from set_dir import *
#---------------------------------------------------------------------------
class Settings_Dlg(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, _('Settings'), wx.DefaultPosition)
self.Centre(wx.BOTH)
#======================
box = wx.BoxSizer(wx.VERTICAL)
b1 = wx.StaticBoxSizer(wx.StaticBox(self, -1, _('Length of cycle')), wx.VERTICAL)
i = wx.NewId()
self.cb1 = wx.CheckBox(self, i, _(' by average'), style=wx.NO_BORDER)
b1.Add(self.cb1, 0, wx.ALL, 5)
self.Bind(wx.EVT_CHECKBOX, self.By_Average, id=i)
self.cb1.SetValue(cycle.by_average)
b2 = wx.BoxSizer(wx.HORIZONTAL)
i = wx.NewId()
self.sc = wx.SpinCtrl(self, i, "", size=wx.Size(50, -1))
self.sc.SetRange(21, 35)
self.sc.SetValue(cycle.period)
self.sc.Enable(not self.cb1.GetValue())
b2.Add(self.sc, 0)
b2.Add(wx.StaticText(self, -1, _(' days in cycle')), 0)
b1.Add(b2, 0, wx.ALL, 5)
box.Add(b1, 0, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, 10)
#======================
self.rb = wx.RadioBox(self, -1, _('Display'),
choices = [_('fertile days'), _('none')],
majorDimension=1, style=wx.RA_SPECIFY_COLS)
box.Add(self.rb, 0, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, 10)
self.rb.SetSelection(cycle.disp)
#======================
self.rb1 = wx.RadioBox(self, -1, _('First week day'),
choices=[_('monday'), _('sunday')],
majorDimension=1, style=wx.RA_SPECIFY_COLS)
box.Add(self.rb1, 0, wx.EXPAND | wx.ALL, 10)
self.rb1.SetSelection(cycle.first_week_day)
#======================
i = wx.NewId()
txt1 = _('Colours')
txt2 = _('Change password')
w1, h = self.GetTextExtent(txt1)
w2, h = self.GetTextExtent(txt2)
w = max(w1, w2)
box.Add(wx.Button(self, i, txt1, size=wx.Size(w+10, -1)), 0, wx.ALIGN_CENTER)
self.Bind(wx.EVT_BUTTON, self.OnColours, id=i)
#======================
i = wx.NewId()
box.Add(wx.Button(self, i, txt2, size=wx.Size(w + 10, -1)), 0, wx.TOP | wx.ALIGN_CENTER, 10)
self.Bind(wx.EVT_BUTTON, self.OnChangePasswd, id=i)
#======================
but_box = wx.BoxSizer(wx.HORIZONTAL)
i = wx.NewId()
but_box.Add(wx.Button(self, i, _('Ok')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
i = wx.NewId()
but_box.Add(wx.Button(self, i, _('Cancel')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=i)
box.Add(but_box, 0, wx.ALIGN_CENTER)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
def By_Average(self, event):
if event.Checked():
self.sc.Enable(False)
else:
self.sc.Enable(True)
def OnOk(self, event):
if not 21 <= self.sc.GetValue() <= 35:
dlg = wx.MessageDialog(self, _('Period of cycle is invalid!'),
_('Error!'), wx.OK | wx.ICON_ERROR )
dlg.ShowModal()
dlg.Destroy()
return
cycle.period = self.sc.GetValue()
cycle.by_average = self.cb1.GetValue()
cycle.disp = self.rb.GetSelection()
cycle.first_week_day = self.rb1.GetSelection()
self.EndModal(wx.ID_OK)
def OnCancel(self, event):
self.EndModal(wx.ID_CANCEL)
def OnChangePasswd(self, event):
dlg = Ask_Passwd_Dlg(self)
dlg.ShowModal()
dlg.Destroy()
def OnColours(self, event):
dlg = Colours_Dlg(self)
dlg.ShowModal()
dlg.Destroy()
#---------------------------------------------------------------------------
class Ask_Passwd_Dlg(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, _('Password'))
#======================
box = wx.BoxSizer(wx.VERTICAL)
box.Add(wx.StaticText(self, -1, _('Enter your password')), 0,
wx.ALIGN_CENTER|wx.TOP|wx.LEFT|wx.RIGHT, 10)
self.pass1 = wx.TextCtrl(self, -1, "", wx.Point(10, 30),
size=(130, -1), style=wx.TE_PASSWORD)
box.Add(self.pass1, 0, wx.ALIGN_CENTER | wx.ALL, 10)
box.Add(wx.StaticText(self, -1, _('Once more...')), 0,
wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, 10)
self.pass2 = wx.TextCtrl(self, -1, "", wx.Point(10, 80),
size=(130, -1), style=wx.TE_PASSWORD)
box.Add(self.pass2, 0, wx.ALIGN_CENTER|wx.ALL, 10)
b1 = wx.BoxSizer(wx.HORIZONTAL)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Ok')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Cancel')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=i)
self.pass1.SetFocus()
box.Add(b1, 0, wx.ALIGN_CENTER)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
def OnOk(self, event):
err = ""
if self.pass1.GetValue() == "" or self.pass2.GetValue() == "":
err = _('Password must be not EMPTY!')
if self.pass1.GetValue() != self.pass2.GetValue():
err = _('Entering password don\'t match!')
if err != "":
dlg = wx.MessageDialog(self, err,
_('Error!'), wx.OK | wx.ICON_ERROR )
dlg.ShowModal()
dlg.Destroy()
return
cycle.passwd = self.pass1.GetValue()
self.EndModal(wx.ID_OK)
def OnCancel(self, event):
self.EndModal(wx.ID_CANCEL)
#---------------------------------------------------------------------------
def get_users():
#Get list of users
magic_str = 'UserName='
users = [] #array of (user, file) name
p, f_name = get_f_name()
if os.path.exists(p):
files = os.listdir(p)
for f in files:
fd = open(os.path.join(p, f), "rb")
try:
data = cPickle.loads(fd.read())
except (cPickle.UnpicklingError, ImportError, AttributeError, EOFError, IndexError):
fd.seek(0)
data = fd.read(len(magic_str))
if 'username' in data:
users.append((data['username'], f))
elif data == magic_str:
data = fd.read()
n = data.find("===") #find end string
if n is not -1:
users.append((cPickle.loads(data[:n]), f))
else: #old format
users.append((f, f))
users.sort()
return users
#---------------------------------------------------------------------------
class Login_Dlg(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, _('Login'))
self.name = ""
self.file = ""
box = wx.BoxSizer(wx.VERTICAL)
#Get list of users
self.users = get_users()
# p, f_name = get_f_name()
# if os.path.exists(p):
# users = os.listdir(p)
# else:
# users = [_('empty')]
# users.sort()
#======== List users ==============
i = wx.NewId()
self.il = wx.ImageList(16, 16, True)
bmp = wx.Bitmap(os.path.join(bitmaps_dir, 'smiles.bmp'), wx.BITMAP_TYPE_BMP)
mask = wx.Mask(bmp, wx.WHITE)
bmp.SetMask(mask)
idx1 = self.il.Add(bmp)
self.list = wx.ListCtrl(self, i, size = wx.Size(200, 200),
style=wx.LC_REPORT|wx.SUNKEN_BORDER|wx.LC_SINGLE_SEL)
self.list.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self.list.InsertColumn(0, _('Your name'))
for k in range(len(self.users)):
self.list.InsertImageStringItem(k, self.users[k][0], idx1)
self.list.SetColumnWidth(0, 180)
self.list.SetItemState(0, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
self.name = self.users[0][0]
self.file = self.users[0][1]
self.list.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, self.list)
self.list.Bind(wx.EVT_LIST_KEY_DOWN, self.OnKeyDown, self.list)
box.Add(self.list, 0, wx.ALL, 10)
#========= Add user =============
i = wx.NewId()
box.Add(wx.Button(self, i, _('Add user')), 0, wx.ALIGN_CENTER)
self.Bind(wx.EVT_BUTTON, self.OnAdd, id=i)
#========= Ok - Cancel =============
b1 = wx.BoxSizer(wx.HORIZONTAL)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Ok')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Cancel')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=i)
box.Add(b1, 0, wx.ALIGN_CENTER)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
self.list.SetFocus()
def OnItemSelected(self, event):
self.name = self.users[event.GetIndex()][0] #self.list.GetItemText(event.GetIndex())
self.file = self.users[event.GetIndex()][1]
def OnKeyDown(self, event):
if event.GetKeyCode() == ord(" ") or event.GetKeyCode() == wx.WXK_RETURN:
self.OnOk()
else:
event.Skip()
def OnAdd(self, event=None):
if ask_name(self):
self.EndModal(wx.ID_OK)
def OnOk(self, event=None):
dlg = wx.TextEntryDialog(self, self.name + _(', enter you password:'), _('Password'), '',
style=wx.OK | wx.CANCEL | wx.TE_PASSWORD)
while dlg.ShowModal() == wx.ID_OK:
cycle.passwd = dlg.GetValue()
cycle.name = self.name
cycle.file = self.file
if Load_Cycle(cycle.name, cycle.passwd, cycle.file):
dlg.Destroy()
self.EndModal(wx.ID_OK)
return
else:
dlg2 = wx.MessageDialog(self, _('Password is invalid!'),
_('Error!'), wx.OK | wx.ICON_ERROR )
dlg2.ShowModal()
dlg2.Destroy()
dlg.Destroy()
def OnCancel(self, event):
self.EndModal(wx.ID_CANCEL)
#-------------------------------------------------------
def first_login():
#Get list of users
users = get_users()
if users != []:
return 'not_first' #user(s) already exists
if ask_name():
return 'first'
else:
return 'bad_login'
#-------------------------------------------------------
def get_new_file_name():
#create filename for user
while True:
random_chars = "".join(chr(random.randint(0,255)) for i in xrange(4))
random_chars = base64.urlsafe_b64encode(random_chars)
p, random_chars = get_f_name(random_chars)
if not os.path.isfile(random_chars):
return random_chars
#-------------------------------------------------------
def ask_name(parent=None):
# nobody, it is first login
wx.MessageBox(
_("This program is not a reliable contraceptive method.\n"
"Neither does it help to prevent sexually transmitted diseases\n"
"like HIV/AIDS.\n\nIt is just an electronic means of keeping track\n"
"of some of your medical data and extracting some statistical\n"
"conclusions from them. You cannot consider this program as a\n"
"substitute for your gynecologist in any way."))
dlg = wx.TextEntryDialog(parent, _('Enter you name:'), _('New user'), '',
style=wx.OK | wx.CANCEL)
while dlg.ShowModal() == wx.ID_OK:
name = dlg.GetValue()
if name != "":
users = get_users()
exists = False
for i in users:
if name == i[0]:
exists = True
break
if not exists:
d = Ask_Passwd_Dlg(parent)
if d.ShowModal() == wx.ID_OK:
cycle.file = get_new_file_name()
cycle.name = name
d.Destroy()
dlg.Destroy()
#self.EndModal(wx.ID_OK)
set_color_default()
return True
else:
d.Destroy()
continue
else:
err = name + _(' - already exists!')
else:
err = _('Name must be not EMPTY')
d2 = wx.MessageDialog(dlg, err, _('Error!'), wx.OK | wx.ICON_ERROR)
d2.ShowModal()
d2.Destroy()
dlg.Destroy()
return False
#---------------------------------------------------------------------------
class Legend_Dlg(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, _('Legend'))
#======================
box = wx.BoxSizer(wx.VERTICAL)
self._add(box, _('today'), wx.NullColour, wx.SIMPLE_BORDER)
self._add(box, _('begin of cycle'), cycle.colour_set['begin'])
self._add(box, _('prognosis of cycle begin'), cycle.colour_set['prog begin'])
self._add(box, _('conception'), cycle.colour_set['conception'])
self._add(box, _('fertile'), cycle.colour_set['fertile'])
self._add(box, _('ovulation, birth'), cycle.colour_set['ovule'])
self._add(box, _('1-st tablet'), cycle.colour_set['1-st tablet'])
self._add(box, _('tablets no. 22-28 or pause'), cycle.colour_set['pause'])
self._add(box, _('next 1-st tablet'), cycle.colour_set['next 1-st tablet'])
i = wx.NewId()
box.Add(wx.Button(self, i, _('Ok')), 0, wx.ALIGN_CENTER|wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
def _add(self, box, txt, col, st=0):
b = wx.BoxSizer(wx.HORIZONTAL)
w = wx.Window(self, -1, size=wx.Size(15, 15), style=st)
w.SetBackgroundColour(col)
b.Add(w, 0, wx.LEFT|wx.RIGHT, 10)
b.Add(wx.StaticText(self, -1, txt), 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 10)
box.Add(b, 0, wx.TOP, 10)
def OnOk(self, event):
self.EndModal(wx.ID_OK)
#---------------------------------------------------------------------------
class Note_Dlg(wx.Dialog):
def __init__(self, parent, title="", txt=""):
wx.Dialog.__init__(self, parent, -1, title)
self.CentreOnParent(wx.BOTH)
#======================
box = wx.BoxSizer(wx.VERTICAL)
self.txt = wx.TextCtrl(self, -1, txt,
size=(-1, 100), style=wx.TE_MULTILINE)
box.Add( self.txt, 0,
wx.EXPAND|wx.ALIGN_CENTER|wx.TOP|wx.LEFT|wx.RIGHT, 10)
b1 = wx.BoxSizer(wx.HORIZONTAL)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Ok')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Cancel')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=i)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Remove')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnRemove, id=i)
box.Add(b1, 0, wx.ALIGN_CENTER)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
self.txt.SetFocus()
def OnOk(self, event):
self.EndModal(wx.ID_OK)
def OnCancel(self, event):
self.EndModal(wx.ID_CANCEL)
def OnRemove(self, event):
self.EndModal(False)
def Get_Txt(self):
return self.txt.GetValue()
#---------------------------------------------------------------------------
class MyHtmlWindow(wx.html.HtmlWindow):
def __init__(self, parent, id, pos = wx.DefaultPosition, size=wx.DefaultSize):
wx.html.HtmlWindow.__init__(self, parent, id, pos, size)
if "gtk2" in wx.PlatformInfo:
self.SetStandardFonts()
def OnLinkClicked(self, linkinfo):
pass
#---------------------------------------------------------------------------
class Help_Dlg(wx.Dialog):
def __init__(self, parent, title="", txt=""):
wx.Dialog.__init__(self, parent, -1, title)
self.CentreOnParent(wx.BOTH)
#======================
box = wx.BoxSizer(wx.VERTICAL)
self.html = MyHtmlWindow(self, -1, size=(500, 350))
self.html.SetPage(txt)
box.Add(self.html, 0, wx.ALIGN_CENTER|wx.TOP|wx.LEFT|wx.RIGHT, 10)
i = wx.NewId()
box.Add(wx.Button(self, i, _('Ok')), 0, wx.ALIGN_CENTER|wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
def OnOk(self, event):
self.EndModal(wx.ID_OK)
#---------------------------------------------------------------------------
class Colours_Dlg(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, _('Colours settings'))
self.col_set = cycle.colour_set.copy()
self.col_id = cycle.colour_set.keys()
self.data = wx.ColourData()
self.data.SetChooseFull(True)
self.buttons = {}
#======================
box = wx.BoxSizer(wx.VERTICAL)
self._add(box, _('begin of cycle'), 'begin')
self._add(box, _('prognosis of cycle begin'), 'prog begin')
self._add(box, _('conception'), 'conception')
self._add(box, _('fertile'), 'fertile')
self._add(box, _('ovulation, birth'), 'ovule')
self._add(box, _('1-st tablet'), '1-st tablet')
self._add(box, _('tablets no. 22-28 or pause'), 'pause')
self._add(box, _('next 1-st tablet'), 'next 1-st tablet')
b1 = wx.BoxSizer(wx.HORIZONTAL)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Ok')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnOk, id=i)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('By default')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnDefault, id=i)
i = wx.NewId()
b1.Add(wx.Button(self, i, _('Cancel')), 0, wx.ALL, 10)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=i)
box.Add(b1, 0, wx.ALIGN_CENTER)
self.SetAutoLayout(True)
self.SetSizer(box)
box.Fit(self)
def _add(self, box, txt, col):
b = wx.BoxSizer(wx.HORIZONTAL)
i = self.col_id.index(col)
bt = wx.Button(self, i, "", size=wx.Size(15, 15))
self.Bind(wx.EVT_BUTTON, self.get_colour, id=i)
bt.SetBackgroundColour(self.col_set[col])
self.buttons.update({i:bt})
b.Add(bt, 0, wx.LEFT|wx.RIGHT, 10)
b.Add(wx.StaticText(self, -1, txt), 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 10)
box.Add(b, 0, wx.TOP, 10)
def get_colour(self, event):
c = self.col_set[ self.col_id[event.GetId()] ]
self.data.SetColour(c)
dlg = wx.ColourDialog(self, self.data)
if dlg.ShowModal() == wx.ID_OK:
self.data = dlg.GetColourData()
c = self.data.GetColour()
self.buttons[event.GetId()].SetBackgroundColour(c)
self.col_set[self.col_id[event.GetId()]] = c
def OnOk(self, event):
cycle.colour_set = self.col_set.copy()
Val.Cal.Draw_Mark()
self.EndModal(wx.ID_OK)
def OnDefault(self, event):
self.col_set = {'begin':wx.NamedColour('RED'),
'prog begin':wx.NamedColour('PINK'),
'conception':wx.NamedColour('MAGENTA'),
'fertile':wx.NamedColour('GREEN YELLOW'),
'ovule':wx.NamedColour('SPRING GREEN'),
'1-st tablet':wx.NamedColour('GOLD'),
'pause':wx.NamedColour('LIGHT BLUE'),
'next 1-st tablet':wx.NamedColour('PINK')}
for item in self.col_id:
self.buttons[self.col_id.index(item)].SetBackgroundColour(self.col_set[item])
def OnCancel(self, event):
self.EndModal(wx.ID_CANCEL)
#---------------------------------------------------------------------------
| gpl-2.0 | 7,211,397,491,379,076,000 | 35.852575 | 100 | 0.502699 | false | 3.445939 | false | false | false |
rolobio/sshm | sshm/main.py | 1 | 5215 | #! /usr/bin/env python3
"""
This module allows the console to use SSHM's functionality.
This module should only be run by the console!
"""
from __future__ import print_function
import sys
try: # pragma: no cover version specific
from lib import sshm
except ImportError: # pragma: no cover version specific
from sshm.lib import sshm
__all__ = ['main']
def get_argparse_args(args=None):
"""
Get the arguments passed to this script when it was run.
@param args: A list of arguments passed in the console.
@type args: list
@returns: A tuple containing (args, command, extra_args)
@rtype: tuple
"""
try: # pragma: no cover
from _info import __version__, __long_description__
except ImportError: # pragma: no cover
from sshm._info import __version__, __long_description__
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__long_description__)
parser.add_argument('servers', nargs='+')
parser.add_argument('command')
parser.add_argument('-s', '--sorted-output', action='store_true', default=False,
help='Sort the output by the URI of each instance. This will wait for all instances to finish before showing any output!')
parser.add_argument('-p', '--strip-whitespace', action='store_true', default=False,
help='Remove any whitespace surrounding the output of each instance.')
parser.add_argument('-d', '--disable-formatting', action='store_true', default=False,
help='Disable command formatting.')
parser.add_argument('-u', '--quiet', action='store_true', default=False,
help="Hide SSHM's server information on output (this implies sorted).")
parser.add_argument('-w', '--workers', type=int, default=20,
help="Limit the amount of concurrent SSH connections.")
parser.add_argument('--version', action='version', version='%(prog)s '+__version__)
args, extra_args = parser.parse_known_args(args=args)
# Move any servers that start with a - to extra_args
new_servers = []
for i in args.servers:
if i.startswith('-'):
extra_args.append(i)
else:
new_servers.append(i)
args.servers = new_servers
# If the comand starts with a -, replace it with the last server and
# move the command to extra_args.
if args.command.startswith('-'):
extra_args.append(args.command)
args.command = args.servers.pop(-1)
if args.quiet:
args.sorted_output = True
return (args, args.command, extra_args)
def _print_handling_newlines(uri, return_code, to_print, header='', strip_whitespace=False, quiet=False, file=sys.stdout):
"""
Print "to_print" to "file" with the formatting needed to represent it's data
properly.
"""
if strip_whitespace:
to_print = to_print.strip()
if to_print.count('\n') == 0:
sep = ' '
else:
sep = '\n'
output_str = 'sshm: {header}{uri}({return_code}):{sep}{to_print}'
if quiet:
output_str = '{to_print}'
print(output_str.format(header=header,
uri=uri,
return_code=return_code,
sep=sep,
to_print=to_print), file=file)
def main():
"""
Run SSHM using console provided arguments.
This should only be run using a console!
"""
import select
args, command, extra_arguments = get_argparse_args()
# Only provided stdin if there is data
r_list, i, i = select.select([sys.stdin], [], [], 0)
if r_list:
stdin = r_list[0]
else:
stdin = None
# Perform the command on each server, print the results to stdout.
results = sshm(args.servers, command, extra_arguments, stdin, args.disable_formatting, args.workers)
# If a sorted output is requested, gather all results before output.
if args.sorted_output:
results = list(results)
results = sorted(results, key=lambda x: x['uri'])
exit_code = 0
for result in results:
exit_code = exit_code or result.get('return_code')
if result.get('stdout') != None:
_print_handling_newlines(result['uri'],
result['return_code'],
result['stdout'],
strip_whitespace=args.strip_whitespace,
quiet=args.quiet,
)
if result.get('stderr'):
_print_handling_newlines(result['uri'],
result.get('return_code', ''),
result['stderr'],
'Error: ',
strip_whitespace=args.strip_whitespace,
quiet=args.quiet,
file=sys.stderr,
)
if result.get('traceback'):
_print_handling_newlines(result['uri'],
result['traceback'],
'Traceback: ',
strip_whitespace=args.strip_whitespace,
quiet=args.quiet,
file=sys.stderr,
)
# Exit with non-zero when there is a failure
sys.exit(exit_code)
if __name__ == '__main__':
main()
| gpl-2.0 | 6,128,735,719,583,437,000 | 33.536424 | 135 | 0.595781 | false | 4.116022 | false | false | false |
jmathai/elodie | elodie/tests/config_test.py | 1 | 3912 | from __future__ import absolute_import
# Project imports
import os
import sys
import unittest
from mock import patch
from tempfile import gettempdir
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))))
from elodie import constants
from elodie.config import load_config, load_plugin_config
@patch('elodie.config.config_file', '%s/config.ini-singleton-success' % gettempdir())
def test_load_config_singleton_success():
with open('%s/config.ini-singleton-success' % gettempdir(), 'w') as f:
f.write("""
[MapQuest]
key=your-api-key-goes-here
prefer_english_names=False
""")
if hasattr(load_config, 'config'):
del load_config.config
config = load_config()
assert config['MapQuest']['key'] == 'your-api-key-goes-here', config.get('MapQuest', 'key')
config.set('MapQuest', 'key', 'new-value')
config = load_config()
if hasattr(load_config, 'config'):
del load_config.config
assert config['MapQuest']['key'] == 'new-value', config.get('MapQuest', 'key')
@patch('elodie.config.config_file', '%s/config.ini-does-not-exist' % gettempdir())
def test_load_config_singleton_no_file():
if hasattr(load_config, 'config'):
del load_config.config
config = load_config()
if hasattr(load_config, 'config'):
del load_config.config
assert config == {}, config
@patch('elodie.config.config_file', '%s/config.ini-load-plugin-config-unset-backwards-compat' % gettempdir())
def test_load_plugin_config_unset_backwards_compat():
with open('%s/config.ini-load-plugin-config-unset-backwards-compat' % gettempdir(), 'w') as f:
f.write("""
""")
if hasattr(load_config, 'config'):
del load_config.config
plugins = load_plugin_config()
if hasattr(load_config, 'config'):
del load_config.config
assert plugins == [], plugins
@patch('elodie.config.config_file', '%s/config.ini-load-plugin-config-exists-not-set' % gettempdir())
def test_load_plugin_config_exists_not_set():
with open('%s/config.ini-load-plugin-config-exists-not-set' % gettempdir(), 'w') as f:
f.write("""
[Plugins]
""")
if hasattr(load_config, 'config'):
del load_config.config
plugins = load_plugin_config()
if hasattr(load_config, 'config'):
del load_config.config
assert plugins == [], plugins
@patch('elodie.config.config_file', '%s/config.ini-load-plugin-config-one' % gettempdir())
def test_load_plugin_config_one():
with open('%s/config.ini-load-plugin-config-one' % gettempdir(), 'w') as f:
f.write("""
[Plugins]
plugins=Dummy
""")
if hasattr(load_config, 'config'):
del load_config.config
plugins = load_plugin_config()
if hasattr(load_config, 'config'):
del load_config.config
assert plugins == ['Dummy'], plugins
@patch('elodie.config.config_file', '%s/config.ini-load-plugin-config-one-with-invalid' % gettempdir())
def test_load_plugin_config_one_with_invalid():
with open('%s/config.ini-load-plugin-config-one' % gettempdir(), 'w') as f:
f.write("""
[Plugins]
plugins=DNE
""")
if hasattr(load_config, 'config'):
del load_config.config
plugins = load_plugin_config()
if hasattr(load_config, 'config'):
del load_config.config
assert plugins == [], plugins
@patch('elodie.config.config_file', '%s/config.ini-load-plugin-config-many' % gettempdir())
def test_load_plugin_config_many():
with open('%s/config.ini-load-plugin-config-many' % gettempdir(), 'w') as f:
f.write("""
[Plugins]
plugins=GooglePhotos,Dummy
""")
if hasattr(load_config, 'config'):
del load_config.config
plugins = load_plugin_config()
if hasattr(load_config, 'config'):
del load_config.config
assert plugins == ['GooglePhotos','Dummy'], plugins
| apache-2.0 | 2,237,664,676,632,507,000 | 28.862595 | 114 | 0.652607 | false | 3.369509 | true | false | false |
dparaujo/projeto | app_inscricoes/questionarios/migrations/0002_auto_20170220_2126.py | 1 | 1224 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-02-21 00:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questionarios', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='tblquestionarioingresopsid',
name='pessoa',
),
migrations.RemoveField(
model_name='tblquestionariosocioeconomico',
name='pessoa',
),
migrations.AlterField(
model_name='tblquestionarioingresopsid',
name='fez_quantos_curso_psid',
field=models.IntegerField(choices=[(0, 'Nenhum Curso'), (1, 'Um Curso'), (3, 'Dois Cursos'), (4, 'Tr\xeas Cursos'), (5, 'Quatro Cursos'), (6, 'Fiz mais que quatro cursos')], verbose_name='Quantos curso voc\xea fez no PSID?'),
),
migrations.AlterField(
model_name='tblquestionariosocioeconomico',
name='cor_raca',
field=models.IntegerField(choices=[(0, 'Branca'), (1, 'Negro'), (3, 'Pardo'), (4, 'Amarela'), (5, 'Ind\xedgena'), (6, 'N\xe3o Declara')], verbose_name='Cor/Ra\xe7a'),
),
]
| gpl-3.0 | -3,572,128,225,930,525,000 | 36.090909 | 237 | 0.588235 | false | 3.326087 | false | false | false |
crespyl/pcre2 | maint/MultiStage2.py | 1 | 23077 | #! /usr/bin/python
# Multistage table builder
# (c) Peter Kankowski, 2008
##############################################################################
# This script was submitted to the PCRE project by Peter Kankowski as part of
# the upgrading of Unicode property support. The new code speeds up property
# matching many times. The script is for the use of PCRE maintainers, to
# generate the pcre_ucd.c file that contains a digested form of the Unicode
# data tables.
#
# The script has now been upgraded to Python 3 for PCRE2, and should be run in
# the maint subdirectory, using the command
#
# [python3] ./MultiStage2.py >../src/pcre2_ucd.c
#
# It requires four Unicode data tables, DerivedGeneralCategory.txt,
# GraphemeBreakProperty.txt, Scripts.txt, and CaseFolding.txt, to be in the
# Unicode.tables subdirectory. The first of these is found in the "extracted"
# subdirectory of the Unicode database (UCD) on the Unicode web site; the
# second is in the "auxiliary" subdirectory; the other two are directly in the
# UCD directory.
#
# Minor modifications made to this script:
# Added #! line at start
# Removed tabs
# Made it work with Python 2.4 by rewriting two statements that needed 2.5
# Consequent code tidy
# Adjusted data file names to take from the Unicode.tables directory
# Adjusted global table names by prefixing _pcre_.
# Commented out stuff relating to the casefolding table, which isn't used;
# removed completely in 2012.
# Corrected size calculation
# Add #ifndef SUPPORT_UCP to use dummy tables when no UCP support is needed.
# Update for PCRE2: name changes, and SUPPORT_UCP is abolished.
#
# Major modifications made to this script:
# Added code to add a grapheme break property field to records.
#
# Added code to search for sets of more than two characters that must match
# each other caselessly. A new table is output containing these sets, and
# offsets into the table are added to the main output records. This new
# code scans CaseFolding.txt instead of UnicodeData.txt.
#
# Update for Python3:
# . Processed with 2to3, but that didn't fix everything
# . Changed string.strip to str.strip
# . Added encoding='utf-8' to the open() call
# . Inserted 'int' before blocksize/ELEMS_PER_LINE because an int is
# required and the result of the division is a float
#
# The main tables generated by this script are used by macros defined in
# pcre2_internal.h. They look up Unicode character properties using short
# sequences of code that contains no branches, which makes for greater speed.
#
# Conceptually, there is a table of records (of type ucd_record), containing a
# script number, character type, grapheme break type, offset to caseless
# matching set, and offset to the character's other case for every character.
# However, a real table covering all Unicode characters would be far too big.
# It can be efficiently compressed by observing that many characters have the
# same record, and many blocks of characters (taking 128 characters in a block)
# have the same set of records as other blocks. This leads to a 2-stage lookup
# process.
#
# This script constructs four tables. The ucd_caseless_sets table contains
# lists of characters that all match each other caselessly. Each list is
# in order, and is terminated by NOTACHAR (0xffffffff), which is larger than
# any valid character. The first list is empty; this is used for characters
# that are not part of any list.
#
# The ucd_records table contains one instance of every unique record that is
# required. The ucd_stage1 table is indexed by a character's block number, and
# yields what is in effect a "virtual" block number. The ucd_stage2 table is a
# table of "virtual" blocks; each block is indexed by the offset of a character
# within its own block, and the result is the offset of the required record.
#
# Example: lowercase "a" (U+0061) is in block 0
# lookup 0 in stage1 table yields 0
# lookup 97 in the first table in stage2 yields 16
# record 17 is { 33, 5, 11, 0, -32 }
# 33 = ucp_Latin => Latin script
# 5 = ucp_Ll => Lower case letter
# 11 = ucp_gbOther => Grapheme break property "Other"
# 0 => not part of a caseless set
# -32 => Other case is U+0041
#
# Almost all lowercase latin characters resolve to the same record. One or two
# are different because they are part of a multi-character caseless set (for
# example, k, K and the Kelvin symbol are such a set).
#
# Example: hiragana letter A (U+3042) is in block 96 (0x60)
# lookup 96 in stage1 table yields 88
# lookup 66 in the 88th table in stage2 yields 467
# record 470 is { 26, 7, 11, 0, 0 }
# 26 = ucp_Hiragana => Hiragana script
# 7 = ucp_Lo => Other letter
# 11 = ucp_gbOther => Grapheme break property "Other"
# 0 => not part of a caseless set
# 0 => No other case
#
# In these examples, no other blocks resolve to the same "virtual" block, as it
# happens, but plenty of other blocks do share "virtual" blocks.
#
# There is a fourth table, maintained by hand, which translates from the
# individual character types such as ucp_Cc to the general types like ucp_C.
#
# Philip Hazel, 03 July 2008
#
# 01-March-2010: Updated list of scripts for Unicode 5.2.0
# 30-April-2011: Updated list of scripts for Unicode 6.0.0
# July-2012: Updated list of scripts for Unicode 6.1.0
# 20-August-2012: Added scan of GraphemeBreakProperty.txt and added a new
# field in the record to hold the value. Luckily, the
# structure had a hole in it, so the resulting table is
# not much bigger than before.
# 18-September-2012: Added code for multiple caseless sets. This uses the
# final hole in the structure.
# 30-September-2012: Added RegionalIndicator break property from Unicode 6.2.0
# 13-May-2014: Updated for PCRE2
# 03-June-2014: Updated for Python 3
# 20-June-2014: Updated for Unicode 7.0.0
# 12-August-2014: Updated to put Unicode version into the file
##############################################################################
import re
import string
import sys
MAX_UNICODE = 0x110000
NOTACHAR = 0xffffffff
# Parse a line of Scripts.txt, GraphemeBreakProperty.txt or DerivedGeneralCategory.txt
def make_get_names(enum):
return lambda chardata: enum.index(chardata[1])
# Parse a line of CaseFolding.txt
def get_other_case(chardata):
if chardata[1] == 'C' or chardata[1] == 'S':
return int(chardata[2], 16) - int(chardata[0], 16)
return 0
# Read the whole table in memory, setting/checking the Unicode version
def read_table(file_name, get_value, default_value):
global unicode_version
f = re.match(r'^[^/]+/([^.]+)\.txt$', file_name)
file_base = f.group(1)
version_pat = r"^# " + re.escape(file_base) + r"-(\d+\.\d+\.\d+)\.txt$"
file = open(file_name, 'r', encoding='utf-8')
f = re.match(version_pat, file.readline())
version = f.group(1)
if unicode_version == "":
unicode_version = version
elif unicode_version != version:
print("WARNING: Unicode version differs in %s", file_name, file=sys.stderr)
table = [default_value] * MAX_UNICODE
for line in file:
line = re.sub(r'#.*', '', line)
chardata = list(map(str.strip, line.split(';')))
if len(chardata) <= 1:
continue
value = get_value(chardata)
m = re.match(r'([0-9a-fA-F]+)(\.\.([0-9a-fA-F]+))?$', chardata[0])
char = int(m.group(1), 16)
if m.group(3) is None:
last = char
else:
last = int(m.group(3), 16)
for i in range(char, last + 1):
# It is important not to overwrite a previously set
# value because in the CaseFolding file there are lines
# to be ignored (returning the default value of 0)
# which often come after a line which has already set
# data.
if table[i] == default_value:
table[i] = value
file.close()
return table
# Get the smallest possible C language type for the values
def get_type_size(table):
type_size = [("uint8_t", 1), ("uint16_t", 2), ("uint32_t", 4),
("signed char", 1), ("pcre_int16", 2), ("pcre_int32", 4)]
limits = [(0, 255), (0, 65535), (0, 4294967295),
(-128, 127), (-32768, 32767), (-2147483648, 2147483647)]
minval = min(table)
maxval = max(table)
for num, (minlimit, maxlimit) in enumerate(limits):
if minlimit <= minval and maxval <= maxlimit:
return type_size[num]
else:
raise OverflowError("Too large to fit into C types")
def get_tables_size(*tables):
total_size = 0
for table in tables:
type, size = get_type_size(table)
total_size += size * len(table)
return total_size
# Compress the table into the two stages
def compress_table(table, block_size):
blocks = {} # Dictionary for finding identical blocks
stage1 = [] # Stage 1 table contains block numbers (indices into stage 2 table)
stage2 = [] # Stage 2 table contains the blocks with property values
table = tuple(table)
for i in range(0, len(table), block_size):
block = table[i:i+block_size]
start = blocks.get(block)
if start is None:
# Allocate a new block
start = len(stage2) / block_size
stage2 += block
blocks[block] = start
stage1.append(start)
return stage1, stage2
# Print a table
def print_table(table, table_name, block_size = None):
type, size = get_type_size(table)
ELEMS_PER_LINE = 16
s = "const %s %s[] = { /* %d bytes" % (type, table_name, size * len(table))
if block_size:
s += ", block = %d" % block_size
print(s + " */")
table = tuple(table)
if block_size is None:
fmt = "%3d," * ELEMS_PER_LINE + " /* U+%04X */"
mult = MAX_UNICODE / len(table)
for i in range(0, len(table), ELEMS_PER_LINE):
print(fmt % (table[i:i+ELEMS_PER_LINE] + (i * mult,)))
else:
if block_size > ELEMS_PER_LINE:
el = ELEMS_PER_LINE
else:
el = block_size
fmt = "%3d," * el + "\n"
if block_size > ELEMS_PER_LINE:
fmt = fmt * int(block_size / ELEMS_PER_LINE)
for i in range(0, len(table), block_size):
print(("/* block %d */\n" + fmt) % ((i / block_size,) + table[i:i+block_size]))
print("};\n")
# Extract the unique combinations of properties into records
def combine_tables(*tables):
records = {}
index = []
for t in zip(*tables):
i = records.get(t)
if i is None:
i = records[t] = len(records)
index.append(i)
return index, records
def get_record_size_struct(records):
size = 0
structure = '/* When recompiling tables with a new Unicode version, please check the\n' + \
'types in this structure definition from pcre2_internal.h (the actual\n' + \
'field names will be different):\n\ntypedef struct {\n'
for i in range(len(records[0])):
record_slice = [record[i] for record in records]
slice_type, slice_size = get_type_size(record_slice)
# add padding: round up to the nearest power of slice_size
size = (size + slice_size - 1) & -slice_size
size += slice_size
structure += '%s property_%d;\n' % (slice_type, i)
# round up to the first item of the next structure in array
record_slice = [record[0] for record in records]
slice_type, slice_size = get_type_size(record_slice)
size = (size + slice_size - 1) & -slice_size
structure += '} ucd_record;\n*/\n\n'
return size, structure
def test_record_size():
tests = [ \
( [(3,), (6,), (6,), (1,)], 1 ), \
( [(300,), (600,), (600,), (100,)], 2 ), \
( [(25, 3), (6, 6), (34, 6), (68, 1)], 2 ), \
( [(300, 3), (6, 6), (340, 6), (690, 1)], 4 ), \
( [(3, 300), (6, 6), (6, 340), (1, 690)], 4 ), \
( [(300, 300), (6, 6), (6, 340), (1, 690)], 4 ), \
( [(3, 100000), (6, 6), (6, 123456), (1, 690)], 8 ), \
( [(100000, 300), (6, 6), (123456, 6), (1, 690)], 8 ), \
]
for test in tests:
size, struct = get_record_size_struct(test[0])
assert(size == test[1])
#print struct
def print_records(records, record_size):
print('const ucd_record PRIV(ucd_records)[] = { ' + \
'/* %d bytes, record size %d */' % (len(records) * record_size, record_size))
records = list(zip(list(records.keys()), list(records.values())))
records.sort(key = lambda x: x[1])
for i, record in enumerate(records):
print((' {' + '%6d, ' * len(record[0]) + '}, /* %3d */') % (record[0] + (i,)))
print('};\n')
script_names = ['Arabic', 'Armenian', 'Bengali', 'Bopomofo', 'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal', \
'Cherokee', 'Common', 'Coptic', 'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic', 'Georgian', \
'Glagolitic', 'Gothic', 'Greek', 'Gujarati', 'Gurmukhi', 'Han', 'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana', \
'Inherited', 'Kannada', 'Katakana', 'Kharoshthi', 'Khmer', 'Lao', 'Latin', 'Limbu', 'Linear_B', 'Malayalam', \
'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya', 'Runic', \
'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu', 'Thaana', \
'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi', \
# New for Unicode 5.0
'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician', \
# New for Unicode 5.1
'Carian', 'Cham', 'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese', 'Vai', \
# New for Unicode 5.2
'Avestan', 'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic', \
'Inscriptional_Pahlavi', 'Inscriptional_Parthian', \
'Javanese', 'Kaithi', 'Lisu', 'Meetei_Mayek', \
'Old_South_Arabian', 'Old_Turkic', 'Samaritan', 'Tai_Tham', 'Tai_Viet', \
# New for Unicode 6.0.0
'Batak', 'Brahmi', 'Mandaic', \
# New for Unicode 6.1.0
'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao', 'Sharada', 'Sora_Sompeng', 'Takri',
# New for Unicode 7.0.0
'Bassa_Vah', 'Caucasian_Albanian', 'Duployan', 'Elbasan', 'Grantha', 'Khojki', 'Khudawadi',
'Linear_A', 'Mahajani', 'Manichaean', 'Mende_Kikakui', 'Modi', 'Mro', 'Nabataean',
'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi',
'Pau_Cin_Hau', 'Siddham', 'Tirhuta', 'Warang_Citi'
]
category_names = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu',
'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps',
'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs' ]
break_property_names = ['CR', 'LF', 'Control', 'Extend', 'Prepend',
'SpacingMark', 'L', 'V', 'T', 'LV', 'LVT', 'Regional_Indicator', 'Other' ]
test_record_size()
unicode_version = ""
script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Common'))
category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn'))
break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other'))
other_case = read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0)
# This block of code was added by PH in September 2012. I am not a Python
# programmer, so the style is probably dreadful, but it does the job. It scans
# the other_case table to find sets of more than two characters that must all
# match each other caselessly. Later in this script a table of these sets is
# written out. However, we have to do this work here in order to compute the
# offsets in the table that are inserted into the main table.
# The CaseFolding.txt file lists pairs, but the common logic for reading data
# sets only one value, so first we go through the table and set "return"
# offsets for those that are not already set.
for c in range(0x10ffff):
if other_case[c] != 0 and other_case[c + other_case[c]] == 0:
other_case[c + other_case[c]] = -other_case[c]
# Now scan again and create equivalence sets.
sets = []
for c in range(0x10ffff):
o = c + other_case[c]
# Trigger when this character's other case does not point back here. We
# now have three characters that are case-equivalent.
if other_case[o] != -other_case[c]:
t = o + other_case[o]
# Scan the existing sets to see if any of the three characters are already
# part of a set. If so, unite the existing set with the new set.
appended = 0
for s in sets:
found = 0
for x in s:
if x == c or x == o or x == t:
found = 1
# Add new characters to an existing set
if found:
found = 0
for y in [c, o, t]:
for x in s:
if x == y:
found = 1
if not found:
s.append(y)
appended = 1
# If we have not added to an existing set, create a new one.
if not appended:
sets.append([c, o, t])
# End of loop looking for caseless sets.
# Now scan the sets and set appropriate offsets for the characters.
caseless_offsets = [0] * MAX_UNICODE
offset = 1;
for s in sets:
for x in s:
caseless_offsets[x] = offset
offset += len(s) + 1
# End of block of code for creating offsets for caseless matching sets.
# Combine the tables
table, records = combine_tables(script, category, break_props,
caseless_offsets, other_case)
record_size, record_struct = get_record_size_struct(list(records.keys()))
# Find the optimum block size for the two-stage table
min_size = sys.maxsize
for block_size in [2 ** i for i in range(5,10)]:
size = len(records) * record_size
stage1, stage2 = compress_table(table, block_size)
size += get_tables_size(stage1, stage2)
#print "/* block size %5d => %5d bytes */" % (block_size, size)
if size < min_size:
min_size = size
min_stage1, min_stage2 = stage1, stage2
min_block_size = block_size
print("/* This module is generated by the maint/MultiStage2.py script.")
print("Do not modify it by hand. Instead modify the script and run it")
print("to regenerate this code.")
print()
print("As well as being part of the PCRE2 library, this module is #included")
print("by the pcre2test program, which redefines the PRIV macro to change")
print("table names from _pcre2_xxx to xxxx, thereby avoiding name clashes")
print("with the library. At present, just one of these tables is actually")
print("needed. */")
print()
print("#ifndef PCRE2_PCRE2TEST")
print()
print("#ifdef HAVE_CONFIG_H")
print("#include \"config.h\"")
print("#endif")
print()
print("#include \"pcre2_internal.h\"")
print()
print("#endif /* PCRE2_PCRE2TEST */")
print()
print("/* Unicode character database. */")
print("/* This file was autogenerated by the MultiStage2.py script. */")
print("/* Total size: %d bytes, block size: %d. */" % (min_size, min_block_size))
print()
print("/* The tables herein are needed only when UCP support is built,")
print("and in PCRE2 that happens automatically with UTF support.")
print("This module should not be referenced otherwise, so")
print("it should not matter whether it is compiled or not. However")
print("a comment was received about space saving - maybe the guy linked")
print("all the modules rather than using a library - so we include a")
print("condition to cut out the tables when not needed. But don't leave")
print("a totally empty module because some compilers barf at that.")
print("Instead, just supply small dummy tables. */")
print()
print("#ifndef SUPPORT_UNICODE")
print("const ucd_record PRIV(ucd_records)[] = {{0,0,0,0,0 }};")
print("const uint8_t PRIV(ucd_stage1)[] = {0};")
print("const uint16_t PRIV(ucd_stage2)[] = {0};")
print("const uint32_t PRIV(ucd_caseless_sets)[] = {0};")
print("#else")
print()
print("const char *PRIV(unicode_version) = \"{}\";".format(unicode_version))
print()
print(record_struct)
# --- Added by PH: output the table of caseless character sets ---
print("const uint32_t PRIV(ucd_caseless_sets)[] = {")
print(" NOTACHAR,")
for s in sets:
s = sorted(s)
for x in s:
print(' 0x%04x,' % x, end=' ')
print(' NOTACHAR,')
print('};')
print()
# ------
print("/* When #included in pcre2test, we don't need this large table. */")
print()
print("#ifndef PCRE2_PCRE2TEST")
print()
print_records(records, record_size)
print_table(min_stage1, 'PRIV(ucd_stage1)')
print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size)
print("#if UCD_BLOCK_SIZE != %d" % min_block_size)
print("#error Please correct UCD_BLOCK_SIZE in pcre2_internal.h")
print("#endif")
print("#endif /* SUPPORT_UNICODE */")
print()
print("#endif /* PCRE2_PCRE2TEST */")
"""
# Three-stage tables:
# Find the optimum block size for 3-stage table
min_size = sys.maxint
for stage3_block in [2 ** i for i in range(2,6)]:
stage_i, stage3 = compress_table(table, stage3_block)
for stage2_block in [2 ** i for i in range(5,10)]:
size = len(records) * 4
stage1, stage2 = compress_table(stage_i, stage2_block)
size += get_tables_size(stage1, stage2, stage3)
# print "/* %5d / %3d => %5d bytes */" % (stage2_block, stage3_block, size)
if size < min_size:
min_size = size
min_stage1, min_stage2, min_stage3 = stage1, stage2, stage3
min_stage2_block, min_stage3_block = stage2_block, stage3_block
print "/* Total size: %d bytes" % min_size */
print_records(records)
print_table(min_stage1, 'ucd_stage1')
print_table(min_stage2, 'ucd_stage2', min_stage2_block)
print_table(min_stage3, 'ucd_stage3', min_stage3_block)
"""
| gpl-3.0 | 2,237,033,308,509,130,800 | 41.894052 | 143 | 0.604325 | false | 3.357143 | true | false | false |
leebird/legonlp | annotation/align.py | 1 | 2062 | # -*- coding: utf-8 -*-
import os
import sys
import re
import codecs
from alignment import Alignment,Hirschberg
from readers import AnnParser
from writers import AnnWriter
writer = AnnWriter()
def get_phrase(text):
p = re.compile(ur'[a-zA-Z]+|[0-9]+|\s+|[.,;!\(\)]+')
lista = []
pre = 0
for m in p.finditer(text):
start = m.start()
end = m.end()
if pre < start:
lista.append(text[pre:start])
lista.append(text[start:end])
pre = end
return lista
for root,_,files in os.walk('input'):
for f in files:
if not f.endswith('.txt'):
continue
pmid = f[:-4]
print pmid
alter = os.path.join(root,pmid+'.txt')
alterFile = codecs.open(alter,'r','utf-8')
alterText = alterFile.read().strip()
alterFile.close()
reader = AnnParser(root,pmid+'.ann')
annotation = reader.parse()
if len(annotation['T']) == 0:
writer.write('output',pmid+'.ann',annotation)
continue
gold = os.path.join('output',pmid+'.txt')
goldFile = codecs.open(gold,'r','utf-8')
goldText = goldFile.read().strip()
goldFile.close()
entities = annotation['T']
goldPhrases = get_phrase(goldText)
alterPhrases = get_phrase(alterText)
h = Hirschberg(goldPhrases,alterPhrases)
#h = Hirschberg(list(goldText),list(alterText))
alignGold,alignAlter = h.align()
#print ''.join(alignGold)
#print ''.join(alignAlter)
alter2gold = h.map_alignment(''.join(alignGold),''.join(alignAlter))
for k,e in entities.iteritems():
start = int(e.start)
end = int(e.end)
e.start = alter2gold[start]
if alter2gold[end] - alter2gold[end-1] > 1:
e.end = alter2gold[end-1]+1
else:
e.end = alter2gold[end]
e.text = goldText[e.start:e.end]
writer.write('output',pmid+'.ann',annotation)
| gpl-2.0 | -4,057,926,957,120,265,700 | 27.638889 | 76 | 0.548497 | false | 3.425249 | false | false | false |
cloudburst/libheap | libheap/pydbg/pygdbpython.py | 1 | 4894 | import sys
from functools import wraps
from libheap.frontend.printutils import print_error
try:
import gdb
except ImportError:
print("Not running inside of GDB, exiting...")
sys.exit()
def gdb_is_running(f):
"decorator to make sure gdb is running"
@wraps(f)
def _gdb_is_running(*args, **kwargs):
if (gdb.selected_thread() is not None):
return f(*args, **kwargs)
else:
print_error("GDB is not running.")
return _gdb_is_running
class pygdbpython:
def __init__(self):
self.inferior = None
@gdb_is_running
def execute(self, cmd, to_string=True):
return gdb.execute(cmd, to_string=to_string)
def format_address(self, value):
"""Helper for printing gdb.Value on both python 2 and 3
"""
try:
ret = int(value)
except gdb.error:
# python2 error: Cannot convert value to int.
# value.cast(gdb.lookup_type("unsigned long"))
ret = int(str(value).split(' ')[0], 16)
return ret
@gdb_is_running
def get_heap_address(self, mp=None):
"""Read heap address from glibc's mp_ structure if available,
otherwise fall back to /proc/self/maps which is unreliable.
"""
start, end = None, None
if mp is not None:
from libheap.ptmalloc.malloc_par import malloc_par
if isinstance(mp, malloc_par):
start = mp.sbrk_base
else:
print_error("Please specify a valid malloc_par variable")
# XXX: add end from arena(s).system_mem ?
else:
pid, task_id, thread_id = gdb.selected_thread().ptid
maps_file = "/proc/%d/task/%d/maps"
maps_data = open(maps_file % (pid, task_id)).readlines()
for line in maps_data:
if any(x.strip() == '[heap]' for x in line.split(' ')):
heap_range = line.split(' ')[0]
start, end = [int(h, 16) for h in heap_range.split('-')]
break
return start, end
@gdb_is_running
def get_arch(self):
cmd = self.execute("maintenance info sections ?")
return cmd.strip().split()[-1:]
def get_inferior(self):
try:
if self.inferior is None:
if len(gdb.inferiors()) == 0:
print_error("No gdb inferior could be found.")
return -1
else:
self.inferior = gdb.inferiors()[0]
return self.inferior
else:
return self.inferior
except AttributeError:
print_error("This gdb's python support is too old.")
sys.exit()
@gdb_is_running
def get_size_sz(self):
try:
_machine = self.get_arch()[0]
except IndexError:
_machine = ""
SIZE_SZ = 0
print_error("Retrieving SIZE_SZ failed.")
except TypeError: # gdb is not running
_machine = ""
SIZE_SZ = 0
print_error("Retrieving SIZE_SZ failed.")
if "elf64" in _machine:
SIZE_SZ = 8
elif "elf32" in _machine:
SIZE_SZ = 4
else:
SIZE_SZ = 0
print_error("Retrieving SIZE_SZ failed.")
return SIZE_SZ
@gdb_is_running
def read_memory(self, address, length):
if self.inferior is None:
self.inferior = self.get_inferior()
return self.inferior.read_memory(address, length)
@gdb_is_running
def read_variable(self, variable=None):
if variable is None:
print_error("Please specify a variable to read")
return None
try:
return gdb.selected_frame().read_var(variable)
except RuntimeError:
# No idea why this works but sometimes the frame is not selected
# print_error("No gdb frame is currently selected.\n")
try:
return gdb.selected_frame().read_var(variable)
except RuntimeError:
# variable was not found
# print_error("wrong here!")
return None
except ValueError:
# variable was not found
return None
@gdb_is_running
def string_to_argv(self, arg=None):
if arg is not None:
return gdb.string_to_argv(arg)
@gdb_is_running
def write_memory(self, address, buf, length=None):
if self.inferior is None:
self.inferior = self.get_inferior()
try:
if length is None:
self.inferior.write_memory(address, buf)
else:
self.inferior.write_memory(address, buf, length)
except MemoryError:
print_error("GDB inferior write_memory error")
| mit | -2,558,321,440,610,970,000 | 29.397516 | 76 | 0.53964 | false | 4.109152 | false | false | false |
Panos512/inspire-next | inspirehep/modules/records/receivers.py | 1 | 12132 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015, 2016 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Pre-record receivers."""
from flask import current_app
from invenio_indexer.signals import before_record_index
from invenio_records.signals import (
before_record_insert,
before_record_update,
)
from inspirehep.utils.date import create_valid_date
from inspirehep.dojson.utils import get_recid_from_ref, classify_field
from inspirehep.dojson.utils import get_recid_from_ref
from inspirehep.utils.date import create_valid_date
from invenio_indexer.signals import before_record_index
import six
from .signals import after_record_enhanced
@before_record_index.connect
def enhance_record(sender, json, *args, **kwargs):
"""Runs all the record enhancers and fires the after_record_enhanced signals
to allow receivers work with a fully populated record."""
populate_inspire_subjects(sender, json, *args, **kwargs)
populate_inspire_document_type(sender, json, *args, **kwargs)
match_valid_experiments(sender, json, *args, **kwargs)
dates_validator(sender, json, *args, **kwargs)
add_recids_and_validate(sender, json, *args, **kwargs)
after_record_enhanced.send(json)
@before_record_insert.connect
@before_record_update.connect
def normalize_field_categories(sender, *args, **kwargs):
"""Normalize field_categories."""
for idx, field in enumerate(sender.get('field_categories', [])):
if field.get('scheme') == "INSPIRE" or '_scheme' in field or '_term' in field:
# Already normalized form
continue
original_term = field.get('term')
normalized_term = classify_field(original_term)
scheme = 'INSPIRE' if normalized_term else None
original_scheme = field.get('scheme')
if isinstance(original_scheme, (list, tuple)):
original_scheme = original_scheme[0]
updated_field = {
'_scheme': original_scheme,
'scheme': scheme,
'_term': original_term,
'term': normalized_term,
}
source = field.get('source')
if source:
if 'automatically' in source:
source = 'INSPIRE'
updated_field['source'] = source
sender['field_categories'][idx].update(updated_field)
def populate_inspire_subjects(recid, json, *args, **kwargs):
"""
Populate a json record before indexing it to elastic.
Adds a field for faceting INSPIRE subjects
"""
inspire_subjects = [
s['term'] for s in json.get('field_categories', [])
if s.get('scheme', '') == 'INSPIRE' and s.get('term')
]
json['facet_inspire_subjects'] = inspire_subjects
def populate_inspire_document_type(recid, json, *args, **kwargs):
""" Populates a json record before indexing it to elastic.
Adds a field for faceting INSPIRE document type
"""
inspire_doc_type = []
if 'collections' in json:
for element in json.get('collections', []):
if 'primary' in element and element.get('primary', ''):
if element['primary'].lower() == 'published':
inspire_doc_type.append('peer reviewed')
break
elif element['primary'].lower() == 'thesis':
inspire_doc_type.append(element['primary'].lower())
break
elif element['primary'].lower() == 'book':
inspire_doc_type.append(element['primary'].lower())
break
elif element['primary'].lower() == 'bookchapter':
inspire_doc_type.append('book chapter')
break
elif element['primary'].lower() == 'proceedings':
inspire_doc_type.append(element['primary'].lower())
break
elif element['primary'].lower() == 'conferencepaper':
inspire_doc_type.append('conference paper')
break
elif element['primary'].lower() == 'note':
inspire_doc_type.append('note')
break
elif element['primary'].lower() == 'report':
inspire_doc_type.append(element['primary'].lower())
break
elif element['primary'].lower() == 'activityreport':
inspire_doc_type.append('activity report')
break
complete_pub_info = []
if not inspire_doc_type:
for field in json.get('publication_info', []):
for k, v in field.iteritems():
complete_pub_info.append(k)
if ('page_start' not in complete_pub_info and
'artid' not in 'complete_pub_info'):
inspire_doc_type.append('preprint')
inspire_doc_type.extend([s['primary'].lower() for s in
json.get('collections', []) if 'primary'
in s and s['primary'] is not None and
s['primary'].lower() in
('review', 'lectures')])
json['facet_inspire_doc_type'] = inspire_doc_type
def match_valid_experiments(recid, json, *args, **kwargs):
"""Matches misspelled experiment names with valid experiments.
Tries to match with valid experiments by matching lowercased and
whitespace-free versions of known experiments.
"""
experiments = json.get("accelerator_experiments")
if experiments:
for exp in experiments:
# FIXME: These lists are temporary. We should have a list of experiment names
# that is generated from the current state of our data.
from .experiment_list import EXPERIMENTS_NAMES as experiments_list_original, experiments_list
facet_experiments_list = []
experiments = exp.get("experiment")
if experiments:
if type(experiments) is not list:
experiments = [experiments]
for experiment in experiments:
experiment = experiment.lower()
experiment = experiment.replace(' ', '')
try:
# Check if normalized form of experiment is in the list of
# valid experiments
x = experiments_list.index(experiment)
facet_experiment = experiments_list_original[x]
except ValueError:
# If the experiment cannot be matched it is considered
# valid
facet_experiment = exp.get("experiment")
facet_experiments_list.append(facet_experiment)
exp.update({"facet_experiment": [facet_experiments_list]})
def dates_validator(recid, json, *args, **kwargs):
"""Find and assign the correct dates in a record."""
dates_to_check = ['opening_date', 'closing_date', 'deadline_date']
for date_key in dates_to_check:
if date_key in json:
valid_date = create_valid_date(json[date_key])
if valid_date != json[date_key]:
current_app.logger.warning(
'MALFORMED: {0} value in {1}: {3}'.format(
date_key, recid, json[date_key]
)
)
json[date_key] = valid_date
def references_validator(recid, json, *args, **kwargs):
"""Find and assign the correct references in a record."""
for ref in json.get('references', []):
if ref.get('recid') and not six.text_type(ref.get('recid')).isdigit():
# Bad recid! Remove.
current_app.logger.warning(
'MALFORMED: recid value found in references of {0}: {1}'.format(recid, ref.get('recid')))
del ref['recid']
def populate_recid_from_ref(recid, json, *args, **kwargs):
"""Extracts recids from all reference fields and adds them to ES.
For every field that has as a value a reference object to another record,
add a sibling after extracting the record id. e.g.
{"record": {"$ref": "http://x/y/2}}
is transformed to:
{"record": {"$ref": "http://x/y/2},
"recid": 2}
Siblings are renamed using the following scheme:
Remove "record" occurrences and append _recid without doubling or
prepending underscores to the original name.
For every known list of object references add a new list with the
corresponding recids. e.g.
{"records": [{"$ref": "http://x/y/1"}, {"$ref": "http://x/y/2"}]}
is transformed to:
{"records": [{"$ref": "http://x/y/1"}, {"$ref": "http://x/y/2"}]
"recids": [1, 2]}
"""
list_ref_fields_translations = {
'deleted_records': 'deleted_recids'
}
def _recusive_find_refs(json_root):
if isinstance(json_root, list):
items = enumerate(json_root)
elif isinstance(json_root, dict):
# Note that items have to be generated before altering the dict.
# In this case, iteritems might break during iteration.
items = json_root.items()
else:
items = []
for key, value in items:
if (isinstance(json_root, dict) and isinstance(value, dict) and
'$ref' in value):
# Append '_recid' and remove 'record' from the key name.
key_basename = key.replace('record', '').rstrip('_')
new_key = '{}_recid'.format(key_basename).lstrip('_')
json_root[new_key] = get_recid_from_ref(value)
elif (isinstance(json_root, dict) and isinstance(value, list) and
key in list_ref_fields_translations):
new_list = [get_recid_from_ref(v) for v in value]
new_key = list_ref_fields_translations[key]
json_root[new_key] = new_list
else:
_recusive_find_refs(value)
_recusive_find_refs(json)
def add_recids_and_validate(recid, json, *args, **kwargs):
"""Ensure that recids are generated before being validated."""
populate_recid_from_ref(recid, json, *args, **kwargs)
references_validator(recid, json, *args, **kwargs)
@before_record_insert.connect
@before_record_update.connect
def normalize_field_categories(sender, *args, **kwargs):
"""Normalize field_categories."""
for idx, field in enumerate(sender.get('field_categories', [])):
if field.get('scheme') == "INSPIRE" or '_scheme' in field or '_term' in field:
# Already normalized form
continue
original_term = field.get('term')
normalized_term = classify_field(original_term)
scheme = 'INSPIRE' if normalized_term else None
original_scheme = field.get('scheme')
if isinstance(original_scheme, (list, tuple)):
original_scheme = original_scheme[0]
updated_field = {
'_scheme': original_scheme,
'scheme': scheme,
'_term': original_term,
'term': normalized_term,
}
source = field.get('source')
if source:
if 'automatically' in source:
source = 'INSPIRE'
updated_field['source'] = source
sender['field_categories'][idx].update(updated_field)
| gpl-2.0 | -3,472,888,303,116,580,000 | 40.406143 | 105 | 0.585889 | false | 4.143443 | false | false | false |
so-sure/tagged-route53 | tagged-route53.py | 1 | 10149 | #!/usr/bin/python
import requests
import boto3
import argparse
class Dns(object):
# Default constructor of the class.
def __init__(self):
self.ec2_client = boto3.client('ec2')
self.dns_client = boto3.client('route53')
self.role = None
self.env = None
self.instance_id = None
self.instances = None
self.indexes = None
self.instance_count = None
self.hostname = None
self.ip = None
self.use_public_ip = None
self.domain = None
self.set_tag_name = True
self.set_dns_registration = True
self.force_dns_registration = False
self.tag_env = None
self.tag_role = None
self.tag_index = None
self.name = None
self.update_dns = True
self.quiet = False
self.update_index = True
def current_instance(self):
response = requests.get('http://169.254.169.254/latest/meta-data/instance-id')
self.instance_id = response.text
if not self.quiet:
print 'Instance: %s' % (self.instance_id)
def current_public_ip(self):
response = self.ec2_client.describe_instances(InstanceIds=[self.instance_id])
instances = response['Reservations']
self.ip = instances[0]['Instances'][0]['PublicIpAddress']
if not self.quiet:
print 'IP: %s' % (self.ip)
def current_private_ip(self):
response = self.ec2_client.describe_instances(InstanceIds=[self.instance_id])
instances = response['Reservations']
self.ip = instances[0]['Instances'][0]['PrivateIpAddress']
if not self.quiet:
print 'IP: %s' % (self.ip)
def current_role_env(self):
if self.instance_id is None:
self.current_instance()
response = self.ec2_client.describe_instances(InstanceIds=[self.instance_id])
instances = response['Reservations']
# Only 1 instance
tags = instances[0]['Instances'][0]['Tags']
for tag in tags:
if self.env is None and tag['Key'] == self.tag_env:
self.env = tag['Value']
elif self.role is None and tag['Key'] == self.tag_role:
self.role = tag['Value']
if not self.quiet:
print 'Env: %s Role: %s' % (self.env, self.role)
def get_instance_ids(self):
if self.env is None or self.role is None:
self.current_role_env()
filters = [
{ 'Name':'tag:%s' % (self.tag_env), 'Values':[self.env]},
{ 'Name':'tag:%s' % (self.tag_role), 'Values':[self.role]}
]
response = self.ec2_client.describe_instances(Filters=filters)
instances = response['Reservations']
if not self.quiet:
print 'Checking tags'
self.instances = {}
self.indexes = []
for instance in instances:
index = -1
if instance['Instances'][0]['State']['Name'] == 'running':
instance_id = instance['Instances'][0]['InstanceId']
tags = instance['Instances'][0]['Tags']
for tag in tags:
if tag['Key'] == self.tag_index:
index = tag['Value']
self.indexes.append(index)
self.instances[instance_id] = int(index)
def get_instance_count(self):
if self.instances is None:
self.get_instance_ids()
# the current instance will be in the list, but as we want to start at 1, that's good
self.instance_count = len(self.instances)
if not self.quiet:
print 'Instance count: %d' % (self.instance_count)
if self.instances.has_key(self.instance_id) and self.instances[self.instance_id] >= 0:
self.instance_count = self.instances[self.instance_id]
if not self.quiet:
print 'Index is already set %s' % (self.instance_count)
self.update_dns = False
self.update_index = False
if self.instance_count < 1:
raise Exception('Instance count must be 1 or more')
if not self.quiet:
print self.indexes
if self.update_index:
# May be replacing a previous server
for i in range(1, self.instance_count + 2):
if str(i) not in self.indexes:
self.instance_count = i
break
if not self.quiet:
print 'Using index: %d' % (self.instance_count)
if self.update_index:
self.ec2_client.create_tags(
Resources=[self.instance_id],
Tags=[{'Key': self.tag_index, 'Value': str(self.instance_count) }]
)
if self.set_tag_name:
name = '%s-%s-%d' % (self.env, self.role, self.instance_count)
if not self.quiet:
print 'Setting instance name: %s' % (name)
self.ec2_client.create_tags(
Resources=[self.instance_id],
Tags=[{'Key': 'Name', 'Value': name }]
)
def get_hostname(self):
if self.instance_count is None:
self.get_instance_count()
if self.name is None:
self.hostname = '%s-%d.%s.%s' % (self.role, self.instance_count, self.env, self.domain)
else:
self.hostname = "%s.%s" % (self.name, self.domain)
if not self.quiet:
print 'Hostname: %s' % (self.hostname)
else:
print self.hostname
def run_update_all(self):
self.get_instance_ids()
if not self.quiet:
print self.instances
for instance_id in self.instances.keys():
if not self.quiet:
print 'Updating instance %s' % (instance_id)
self.instance_id = instance_id
self.run_update_dns()
self.indexes.append(str(self.instance_count))
self.hostname = None
self.ip = None
self.instance_count = None
self.update_dns = True
def run_update_dns(self):
if self.hostname is None:
self.get_hostname()
if not self.update_dns and not self.force_dns_registration:
if not self.quiet:
print 'Skipping dns update as server already exists'
return
if not self.set_dns_registration:
if not self.quiet:
print 'Skipping dns registration as per request'
return
if self.ip is None:
if self.use_public_ip:
self.current_public_ip()
else:
self.current_private_ip()
response = self.dns_client.list_hosted_zones_by_name(
DNSName=self.domain
)
zone_id = response['HostedZones'][0]['Id'].replace('/hostedzone/', '')
response = self.dns_client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.hostname,
'Type': 'A',
'TTL': 60,
'ResourceRecords': [
{
'Value': self.ip
},
]
}
},
]
}
)
if not self.quiet:
print response
def main(self):
parser = argparse.ArgumentParser(description='Update route 53 dns based on server tags')
parser.add_argument('domain', help='Domain name')
parser.add_argument('--skip-tag-name', action='store_true', default=False, help='Skip setting the tag name')
parser.add_argument('--skip-dns-registration', action='store_true', default=False, help='If set, only display the dns entry and do run any dns updates')
parser.add_argument('--force-dns-registration', action='store_true', default=False, help='If set, only display the dns entry and do run any dns updates')
parser.add_argument('--quiet', action='store_true', default=False, help='If set, only output the hostname')
parser.add_argument('--tag-role', default='role', help='Role tag name (default: %(default)s)')
parser.add_argument('--tag-env', default='env', help='Environment tag name (default: %(default)s)')
parser.add_argument('--tag-index', default='index', help='Index tag name (default: %(default)s)')
parser.add_argument('--public-ip', action='store_true', default=False, help='Use public ip instead of private ip')
parser.add_argument('--name', default=None, help='Ignore tags and just set name')
parser.add_argument('--role', default=None, help='Ignore tags and use given role')
parser.add_argument('--env', default=None, help='Ignore tags and use given env')
parser.add_argument('--instance-id', default=None, help='If given, use instance id given rather than local instance')
parser.add_argument('--all-tags', action='store_true', default=False, help='If given, run for all instances that match tags for role/env. Can be used with --role and/or --env.')
args = parser.parse_args()
self.domain = args.domain
self.set_tag_name = not args.skip_tag_name
self.set_dns_registration = not args.skip_dns_registration
self.force_dns_registration = args.force_dns_registration
self.quiet = args.quiet
self.tag_env = args.tag_env
self.tag_role = args.tag_role
self.role = args.role
self.env = args.env
self.tag_index = args.tag_index
self.name = args.name
self.use_public_ip = args.public_ip
self.instance_id = args.instance_id
if args.all_tags:
self.run_update_all()
else:
self.run_update_dns()
if __name__ == '__main__':
launcher = Dns()
launcher.main()
| apache-2.0 | -2,124,829,672,126,975,700 | 38.034615 | 185 | 0.549611 | false | 4.09399 | false | false | false |
diegojromerolopez/djanban | src/djanban/apps/password_reseter/email_sender.py | 1 | 1645 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from crequest.middleware import CrequestMiddleware
from django.core.mail import send_mail
from django.template.loader import get_template
from django.conf import settings
from django.urls import reverse
def send_password_request_link(password_request, user):
current_request = CrequestMiddleware.get_request()
absolute_reset_password_url = current_request.build_absolute_uri(
reverse('password_reseter:reset_password', args=(password_request.uuid,))
)
replacements = {"user": user, "absolute_reset_password_url": absolute_reset_password_url}
txt_message = get_template('password_reseter/emails/request_password_reset.txt').render(replacements)
html_message = get_template('password_reseter/emails/request_password_reset.html').render(replacements)
subject = "Djanban :: Request password reset"
return send_mail(subject, txt_message, settings.EMAIL_HOST_USER, recipient_list=[user.email],
fail_silently=False, html_message=html_message)
# The password has been reset successfully
def send_password_reset_successfully_email(user):
replacements = {"user": user}
txt_message = get_template('password_reseter/emails/password_reset_successfully.txt').render(replacements)
html_message = get_template('password_reseter/emails/password_reset_successfully.html').render(replacements)
subject = "Djanban :: Password reset successfully"
return send_mail(subject, txt_message, settings.EMAIL_HOST_USER, recipient_list=[user.email],
fail_silently=False, html_message=html_message)
| mit | -3,453,166,151,156,797,000 | 43.459459 | 112 | 0.743465 | false | 3.888889 | false | false | false |
seanxwzhang/LeetCode | 148 Sort List/solution.py | 1 | 1263 | #! /usr/bin/env python
# Sort a linked list in O(n log n) time using constant space complexity.
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# a merge sort implementation
class Solution(object):
def sortList(self, head):
if not head or not head.next:
return head
fast, slow = head.next, head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
# now slow reaches the middle of the list
mid = slow.next
slow.next = None
sorted_head = self.sortList(head)
sorted_mid = self.sortList(mid)
# now two sub lists are sorted, sort them in O(n)
dummyNode = ListNode(0)
track = dummyNode
while sorted_head and sorted_mid:
if sorted_head.val < sorted_mid.val:
track.next = sorted_head
sorted_head = sorted_head.next
else:
track.next = sorted_mid
sorted_mid = sorted_mid.next
track = track.next
if sorted_head: track.next = sorted_head
if sorted_mid: track.next = sorted_mid
return dummyNode.next
| mit | 6,358,962,998,563,687,000 | 33.162162 | 72 | 0.568488 | false | 3.946875 | false | false | false |
crazcalm/chat-server | server.py | 1 | 11418 | import help_text
import asyncio
import argparse
import logging
from random import randint
clients = []
class SimpleChatClientProtocol(asyncio.Protocol):
"""
This class is the heart of the Chat Server. For each client that
connects to the server, an instance of this class is created. These
instances are saved in a global list.
"""
def __init__(self, name):
self.chatroom_name = name
def _send_msg(self, client, msg, format=True):
"""
This method sends messages clients to other clients
in the chatroom.
Args:
client (SimpleChatClientProtocol): A chat server client
msg (str): message to be sent
"""
if format:
client.transport.write("{}: {}\n".format(self.name,
msg).encode())
else:
client.transport.write("{}\n".format(msg).encode())
def _send_to_self(self, msg, client=False):
"""
This method sends messages to self. Typically used for
help dialogs and other interactions that are meant only
for this client.
Args:
msg (str): message to be sent
"""
if client:
self.transport.write("CLIENT**: {}".format(msg).encode())
else:
self.transport.write("{}\n".format(msg).encode())
def _unique_name(self, name):
"""
This method checks to see if the name that was passed
in as a parameter is unique among the names of the
clients in the chatroom.
Args:
name (str): a potential name
Return:
str or false: Returns False or name, which is Truthy
"""
logging.debug("Is the name {} unique?".format(name))
result = True
for client in clients:
logging.debug("Checking against: {}".format(client.name))
if name == client.name and self != client:
result = False
break
logging.debug("unique: {}".format(result))
return result
def connection_made(self, transport):
"""
This method designates what will happen when a client
makes a connection to the server.
Args:
transport (socket): The incoming socket from the client
"""
self.transport = transport
self.peername = transport.get_extra_info("peername")
self.name = "No Name"
while not self._unique_name(self.name):
self.name += str(randint(0, 9))
self.description = "None"
logging.info("connection_made: {}".format(self.peername).encode())
clients.append(self)
self._send_to_self("Welcome to {}!".format(self.chatroom_name))
self._send_to_self("To see the options available to you type `/help`")
self._send_to_self("Your username name is: {}".format(self.name))
self.send_to_everyone("<--- {} joined the room".format(self.name),
format=False)
def send_to_everyone(self, msg, format=True):
"""
This method sends a message to everyone in the chatroom.
Args:
msg (str): The message to be sent
"""
for client in clients:
self._send_msg(client, msg, format=format)
def find_client_by_name(self, name):
"""
This method attempts to find a client that has a
name that matches the name passed into the method.
If the client is found, a reference to that client
is returned. If the client is not found, then a None
object is returned.
Args:
name (str): The name used in the search
Returns:
False or client: False or client, which is truthy
"""
found = None
for client in clients:
if client.name.strip() == name:
found = client
break
return found
def send_to_list_of_people(self, people, msg):
"""
This method sends a message to a list of people.
Args:
people (list): list of clients
msg (str): The message to be sent
"""
# Currently not used. If I dediced to add groups
# to the app, then I will use this method.
for client in people:
self._send_msg(client, msg)
def data_received(self, data):
"""
This method is in charge of receiving the data that
has been sent from the client. The rules for how
this data is dealt with exist here.
Args:
data (byte): The data received over the socket connection
"""
msg = data.decode().strip()
logging.debug("data_received: {}".format(msg))
if msg == "/disconnect":
self.send_to_everyone("---> {} left the room".format(self.name),
format=False)
self.transport.close()
logging.info("command: /quit")
elif msg == "/whoami":
logging.info("command: /whoami")
self._send_to_self("You are {}\n".format(self.name))
self._send_to_self("Description: {}\n".format(
self.description))
elif msg == "/people":
logging.info("command: /people")
people = [client for client in clients if client != self]
if not people:
self._send_to_self("****No one else is in the room....*****")
for index, client in enumerate(people):
self._send_to_self("{}: {}\n".format(index, client.name))
elif msg == "/chatroom":
logging.info("command: /chatroom")
self._send_to_self("Chatroom name: {}".format(
self.chatroom_name))
elif msg == "/help":
logging.info("command: /help")
self._send_to_self("{}".format(help_text.HELP_GENERAL))
elif msg.startswith("/whois "):
if len(msg.split(' ')) >= 2:
command, name = msg.split(' ', 1)
logging.info("command: {}\Args: {}".format(
command, name))
found = self.find_client_by_name(name.strip())
if found:
self._send_to_self('Name: {}\nDescription: {}'.format(
found.name, found.description))
else:
self._send_to_self("I don't know")
else:
self._send_to_self(help_text.HELP_WHOIS)
elif msg.startswith("/msg "):
if len(msg.split(' ')) and ',' in msg:
args = msg.split(' ', 1)[1]
name, direct_msg = args.split(',', 1)
logging.info("command: /msg-{}, {}".format(name, direct_msg))
found = self.find_client_by_name(name.strip())
if found:
direct_msg = ''.join(direct_msg.strip())
self._send_msg(found, "*{}".format(direct_msg))
self._send_to_self('msg sent')
else:
logging.debug("Not Found: {}".format(name))
self._send_to_self('Could not find {}'.format(name))
else:
self._send_to_self(help_text.HELP_MSG)
elif msg.startswith("/help "):
command_args = msg.split(' ')[:2]
logging.info("command: {}".format(command_args))
error_msg = "{} is not a valid command".format(command_args[1])
msg = help_text.HELP_DICT.get(command_args[1], error_msg)
self._send_to_self(msg)
elif msg.startswith("/set "):
command_args = msg.strip().split(' ')
logging.info("command: {}\n".format(command_args))
key, value = None, None
if len(command_args) >= 3 and\
command_args[1] in ['name', 'description']:
key, *value = command_args[1:]
if key == 'name':
name = ' '.join(value)
if self._unique_name(name):
logging.debug('setting name to {}'.format(value))
self.name = name
self._send_to_self("Name: {}".format(self.name))
else:
self._send_to_self(
"The name you selected is all ready in use."
"\nPlease select another name.")
elif key == 'description':
logging.debug('setting description to {}'.format(value))
self.description = ' '.join(value)
self._send_to_self("Description: {}".format(
self.description))
else:
self._send_to_self(help_text.HELP_SET)
elif msg.startswith("/CLIENT**: USER LIST"):
logging.debug("/CLIENT**: USER LIST")
user_list = [client.name for client in clients]
self._send_to_self(",".join(user_list), client=True)
else:
self.send_to_everyone(msg)
def connection_lost(self, ex):
"""
This method fires when the connections between
the client and server is lost.
Args:
ex (I do not know): I should learn what you are...
"""
logging.info("connection_lost: {}".format(self.peername))
clients.remove(self)
def cli_parser():
"""
This function contains the logic for the command line
parser.
"""
chat_server = argparse.ArgumentParser(
description=help_text.CLI.get('description'),
epilog=help_text.CLI.get('epilog'))
chat_server.add_argument(
"--host",
type=str,
default="localhost",
help=help_text.CLI.get('host'))
chat_server.add_argument(
"--port",
type=int,
default=3333,
help=help_text.CLI.get('port'))
chat_server.add_argument(
"--name",
type=str,
default="Chat Room",
help=help_text.CLI.get('name'))
return chat_server
def run_server(host, port, name):
"""
This function is charge of running the server.
Args:
host (str): host name/ip address
port (int): port to which the app will run on
name (str): the name of the chatroom
"""
logging.info("starting up..")
print("Server running on {}:{}".format(host, port))
host = "127.0.0.1" if host == "localhost" else host
loop = asyncio.get_event_loop()
coro = loop.create_server(lambda: SimpleChatClientProtocol(name),
port=port, host=host)
server = loop.run_until_complete(coro)
for socket in server.sockets:
logging.info("serving on {}".format(socket.getsockname()))
loop.run_forever()
def main():
"""
This function contains the logic for the logger
and is in charge of running this application.
"""
logging.basicConfig(
filename="server_log",
filemode="w",
level=logging.DEBUG,
format='%(asctime)s--%(levelname)a--%(funcName)s--%(name)s:%(message)s'
)
cli_args = cli_parser().parse_args()
run_server(cli_args.host, cli_args.port, cli_args.name)
if __name__ == '__main__':
cli_args = cli_parser()
test = cli_args.parse_args()
main()
| mit | 894,940,125,265,857,700 | 32.881306 | 79 | 0.532055 | false | 4.287646 | false | false | false |
jonwright/ImageD11 | ImageD11/sparseframe.py | 1 | 10124 |
from __future__ import print_function, division
import time, sys
import h5py, scipy.sparse, numpy as np, pylab as pl
from ImageD11 import cImageD11
# see also sandbox/harvest_pixels.py
NAMES = {
"filename" : "original filename used to create a sparse frame",
"intensity" : "corrected pixel values",
"nlabel": "Number of unique labels for an image labelling",
"threshold" : "Cut off used for thresholding",
}
class sparse_frame( object ):
"""
Indices / shape mapping
"""
def __init__(self, row, col, shape, itype=np.uint16, pixels=None):
""" row = slow direction
col = fast direction
shape = size of full image
itype = the integer type to store the indices
our c codes currently use unsigned short...
nnz is implicit as len(row)==len(col)
pixels = numpy arrays in a dict to name them
throw in a ary.attrs if you want to save some
"""
self.check( row, col, shape, itype )
self.shape = shape
self.row = np.asarray(row, dtype = itype )
self.col = np.asarray(col, dtype = itype )
self.nnz = len(self.row)
# Things we could have using those indices:
# raw pixel intensities
# corrected intensities
# smoothed pixel intensities
# labelling via different algorithms
self.pixels = {}
self.meta = {}
if pixels is not None:
for name, val in pixels.items():
assert len(val) == self.nnz
self.pixels[name] = val
def check(self, row, col, shape, itype):
""" Ensure the index data makes sense and fits """
lo = np.iinfo(itype).min
hi = np.iinfo(itype).max
assert len(shape) == 2
assert shape[0] >= lo and shape[0] < hi
assert shape[1] >= lo and shape[1] < hi
assert np.min(row) >= lo and np.max(row) < hi
assert np.min(col) >= lo and np.max(col) < hi
assert len(row) == len(col)
def is_sorted(self):
""" Tests whether the data are sorted into slow/fast order
rows are slow direction
columns are fast """
# TODO: non uint16 cases
assert self.row.dtype == np.uint16 and \
cImageD11.sparse_is_sorted( self.row, self.col ) == 0
def to_dense(self, data=None, out=None):
""" returns the full 2D image
data = name in self.pixels or 1D array matching self.nnz
Does not handle repeated indices
e.g. obj.to_dense( obj.pixels['raw_intensity'] )
"""
if data in self.pixels:
data = self.pixels[data] # give back this array
else:
ks = list( self.pixels.keys() )
if len(ks)==1:
data = self.pixels[ks[0]] # default for only one
else:
data = np.ones( self.nnz, np.bool ) # give a mask
if out is None:
out = np.zeros( self.shape, data.dtype )
else:
assert out.shape == self.shape
assert len(data) == self.nnz
adr = self.row.astype(np.intp) * self.shape[1] + self.col
out.flat[adr] = data
return out
def mask( self, msk ):
""" returns a subset of itself """
spf = sparse_frame( self.row[msk],
self.col[msk],
self.shape, self.row.dtype )
for name, px in self.pixels.items():
if name in self.meta:
m = self.meta[name].copy()
else:
m = None
spf.set_pixels( name, px[msk], meta = m )
return spf
def set_pixels( self, name, values, meta=None ):
""" Named arrays sharing these labels """
assert len(values) == self.nnz
self.pixels[name] = values
if meta is not None:
self.meta[name] = meta
def sort_by( self, name ):
""" Not sure when you would do this. For sorting
by a peak labelling to get pixels per peak """
assert name in self.pixels
order = np.argsort( self.pixels[name] )
self.reorder( self, order )
def sort( self ):
""" Puts you into slow / fast looping order """
order = np.lexsort( ( self.col, self.row ) )
self.reorder( self, order )
def reorder( self, order ):
""" Put the pixels into a different order (in place) """
assert len(order) == self.nnz
self.row[:] = self.row[order]
self.col[:] = self.col[order]
for name, px in self.pixels.items():
px[:] = px[order]
def threshold(self, threshold, name='intensity'):
"""
returns a new sparse frame with pixels > threshold
"""
return self.mask( self.pixels[name] > threshold )
def to_hdf_group( frame, group ):
""" Save a 2D sparse frame to a hdf group
Makes 1 single frame per group
"""
itype = np.dtype( frame.row.dtype )
meta = { "itype" : itype.name,
"shape0" : frame.shape[0],
"shape1" : frame.shape[1] }
for name, value in meta.items():
group.attrs[name] = value
opts = { "compression": "lzf",
"shuffle" : True,
}
#opts = {}
group.require_dataset( "row", shape=(frame.nnz,),
dtype=itype, **opts )
group.require_dataset( "col", shape=(frame.nnz,),
dtype=itype, **opts )
group['row'][:] = frame.row
group['col'][:] = frame.col
for pxname, px in frame.pixels.items():
group.require_dataset( pxname, shape=(frame.nnz,),
dtype=px.dtype,
**opts )
group[pxname][:] = px
if pxname in self.meta:
group[pxname].attrs = dict( self.meta[pxname] )
def from_data_mask( mask, data, header ):
"""
Create a sparse from a dense array
"""
assert mask.shape == data.shape
# using uint16 here - perhaps make this general in the future
# ... but not for now
assert data.shape[0] < pow(2,16)-1
assert data.shape[1] < pow(2,16)-1
nnz = (mask>0).sum()
tmp = np.empty( data.shape[0],'i') # tmp hold px per row cumsums
row = np.empty( nnz, np.uint16 )
col = np.empty( nnz, np.uint16 )
cImageD11.mask_to_coo( mask, row, col, tmp )
intensity = data[ mask > 0 ]
# intensity.attrs = dict(header) # FIXME USE xarray ?
spf = sparse_frame( row, col, data.shape, itype=np.uint16 )
spf.set_pixels( "intensity" , intensity, dict( header ) )
return spf
def from_hdf_group( group ):
itype = np.dtype( group.attrs['itype'] )
shape = group.attrs['shape0'], group.attrs['shape1']
row = group['row'][:] # read it
col = group['col'][:]
spf = sparse_frame( row, col, shape, itype=itype )
for pxname in list(group):
if pxname in ["row", "col"]:
continue
data = group[pxname][:]
header = dict( group[pxname].attrs )
spf.set_pixels( pxname, data, header )
return spf
def sparse_moments( frame, intensity_name, labels_name ):
""" We rely on a labelling array carrying nlabel metadata (==labels.data.max())"""
nl = frame.meta[ labels_name ][ "nlabel" ]
return cImageD11.sparse_blob2Dproperties(
frame.pixels[intensity_name],
frame.row,
frame.col,
frame.pixels[labels_name],
nl )
def overlaps(frame1, labels1, frame2, labels2):
"""
figures out which label of self matches which label of other
Assumes the zero label does not exist (background)
Returns sparse array of:
label in self (row)
label in other (col)
number of shared pixels (data)
"""
ki = np.empty( frame1.nnz, 'i' )
kj = np.empty( frame2.nnz, 'i' )
npx = cImageD11.sparse_overlaps( frame1.row, frame1.col, ki,
frame2.row, frame2.col, kj)
# self.data and other.data filled during init
row = frame1.pixels[labels1][ ki[:npx] ] # my labels
col = frame2.pixels[labels2][ kj[:npx] ] # your labels
ect = np.empty( npx, 'i') # ect = counts of overlaps
tj = np.empty( npx, 'i') # tj = temporary for sorting
n1 = frame1.meta[labels1][ "nlabel" ]
n2 = frame2.meta[labels2][ "nlabel" ]
tmp = np.empty( max(n1, n2)+1, 'i') # for histogram
nedge = cImageD11.compress_duplicates( row, col, ect, tj, tmp )
# overwrites row/col in place : ignore the zero label (hope it is not there)
crow = row[:nedge]-1
ccol = col[:nedge]-1
cdata = ect[:nedge]
cedges = scipy.sparse.coo_matrix( ( cdata, (crow, ccol)), shape=(n1, n2) )
# really?
return cedges
def sparse_connected_pixels( frame,
label_name="connectedpixels",
data_name="intensity",
threshold=None ):
"""
frame = a sparse frame
label_name = the array to save labels to in that frame
data_name = an array in that frame
threshold = float value or take data.threshold
"""
labels = np.zeros( frame.nnz, "i" )
if threshold is None:
threshold = frame.meta[data_name]["threshold"]
nlabel = cImageD11.sparse_connectedpixels(
frame.pixels[data_name], frame.row, frame.col,
threshold, labels )
frame.set_pixels( label_name, labels, { 'nlabel' : nlabel } )
return nlabel
def sparse_localmax( frame,
label_name="localmax",
data_name = "intensity" ):
labels = np.zeros( frame.nnz, "i" )
vmx = np.zeros( frame.nnz, np.float32 )
imx = np.zeros( frame.nnz, 'i')
nlabel = cImageD11.sparse_localmaxlabel(
frame.pixels[data_name], frame.row, frame.col,
vmx, imx, labels )
frame.set_pixels( label_name, labels, { "nlabel" : nlabel } )
return nlabel
| gpl-2.0 | 6,007,533,981,669,690,000 | 34.398601 | 86 | 0.55077 | false | 3.664133 | false | false | false |
nksheridan/elephantAI | test_Deter_as_Server_and_Play_Audio.py | 1 | 1414 | # DETER DEVICE
# this is test code for putting the deter device into server mode, and getting a message via bluetooth from the detection device, and
# then going ahead and playing scare sounds. You need to determine your MAC address. It is for the server in this case, so the MAC address
# of the deter device. You also need to pair the deter device with the detection device via Bluetooth prior to using this. You can do
# that from the Bluetooth icon in the Raspian GUI.
import socket
import time
import os
import random
hostMACaddress = 'xxx'
port = 9
backlog = 1
size = 1024
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
s.bind((hostMACaddress, port))
s.listen(backlog)
print("We are waiting for a message from the detection device to arrive via bluetooth!")
try:
client, address = s.accept()
data = client.recv(size)
if data:
print(data)
client.send(data)
#echo back
except:
print("closing the socket")
client.close()
s.close()
message = str(data)
#convert the data received to a string
print(message)
if message == "b'yes_audio'":
print("play scare sounds now")
time.sleep(3)
scare_sounds = ['aplay bees1.wav', 'aplay bees2.wav', aplay bees3.wav']
i = 0
while i <10:
i = i+1
to_play = random.choice(scare_sounds)
print(to_play)
os.system(to_play)
print("Finished scare. Now can message detection device, and await another message from it")
| mit | 6,251,672,956,260,858,000 | 27.857143 | 138 | 0.737624 | false | 3.128319 | false | false | false |
tyler-cromwell/Acid | client.py | 1 | 2439 | #!/usr/bin/python3
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
The MIT License (MIT)
Copyright (c) 2016 Tyler Cromwell
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import getopt
import readline
import socket
import sys
"""
Readline settings
"""
readline.parse_and_bind('tab: complete')
"""
Connection settings
"""
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_ip = '10.0.0.20'
client_port = 8888
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:p:', ['ipaddress=', 'port='])
""" Process command line arguments """
for o, a in opts:
if o == '-i' or o == '--ipaddress':
client_ip = a
elif o == '-p' or o == '--port':
client_port = int(a)
""" One-time send """
if len(sys.argv) > 1:
message = ''
for i in range(1, len(sys.argv)):
message += sys.argv[i]
if i < (len(sys.argv)-1):
message += ' '
client.sendto(message.encode('utf-8'), (client_ip, client_port))
""" Loop for message """
while len(sys.argv) >= 1:
user_input = input('UDP> ')
if user_input == 'quit' or user_input == 'exit':
break
client.sendto(user_input.encode('utf-8'), (client_ip, client_port))
except EOFError:
print()
except KeyboardInterrupt:
print()
| mit | -3,025,028,006,687,815,700 | 30.675325 | 80 | 0.621976 | false | 3.763889 | false | false | false |
metwit/django-fulmine | fulmine/forms.py | 1 | 2406 | from django import forms
from django.core.exceptions import ValidationError
from fulmine.models import parse_scope
class SeparatedValuesField(forms.CharField):
def __init__(self, *args, **kwargs):
self.separator = kwargs.pop('separator', ' ')
super(SeparatedValuesField, self).__init__(*args, **kwargs)
def clean(self, value):
if not value:
return []
return value.split(self.separator)
class AuthorizationForm(forms.Form):
response_type = forms.ChoiceField(
choices=[('code', 'code'), ('token', 'token')])
client_id = forms.CharField()
redirect_uri = forms.CharField(required=False)
scope = SeparatedValuesField(required=False)
state = forms.CharField(required=False)
def clean_scope(self):
scope = self.cleaned_data['scope']
return parse_scope(scope)
def clean_scope(form):
scope = form.cleaned_data['scope']
return parse_scope(scope)
def make_token_form(grant_type, required_fields=[], optional_fields=[],
django_fields={}):
class_dict = dict()
for field_name in optional_fields:
if field_name == 'scope':
field = SeparatedValuesField(required=False)
else:
field = forms.CharField(required=False)
class_dict[field_name] = field
for field_name in required_fields:
if field_name == 'scope':
field = SeparatedValuesField(required=True)
else:
field = forms.CharField(required=True)
class_dict[field_name] = field
for field_name, field in django_fields.iteritems():
class_dict[field_name] = field
class_dict['clean_scope'] = clean_scope
cls = type('%sTokenForm' % grant_type,
(forms.Form, ),
class_dict
)
return cls
AuthorizationCodeTokenForm = make_token_form('authorization_code',
required_fields=[
'code',
],
optional_fields=[
'redirect_uri',
'client_id',
'scope',
]
)
PasswordTokenForm = make_token_form('password',
required_fields=[
'username',
'password',
'scope',
]
)
ClientCredentialsTokenForm = make_token_form('client_credentials',
required_fields=['scope'],
)
RefreshTokenTokenForm = make_token_form('refresh_token',
required_fields=['refresh_token'],
optional_fields=['scope']
)
| bsd-3-clause | -1,758,042,214,765,443,600 | 24.870968 | 71 | 0.618869 | false | 4.003328 | false | false | false |
gitcoinco/web | app/marketing/management/commands/no_applicants_email.py | 1 | 1922 | '''
Copyright (C) 2021 Gitcoin Core
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils import timezone
from dashboard.models import Bounty
from marketing.mails import no_applicant_reminder
class Command(BaseCommand):
help = 'sends reminder emails to funders whose bounties have 0 applications'
def handle(self, *args, **options):
if settings.DEBUG:
print("not active in non prod environments")
return
start_time_3_days = timezone.now() - timezone.timedelta(hours=24 * 3)
end_time_3_days = timezone.now() - timezone.timedelta(hours=24 * 4)
start_time_7_days = timezone.now() - timezone.timedelta(hours=24 * 7)
end_time_7_days = timezone.now() - timezone.timedelta(hours=24 * 8)
bounties = Bounty.objects.current().filter(
(Q(created_on__range=[end_time_3_days, start_time_3_days]) | Q(created_on__range=[end_time_7_days, start_time_7_days])),
idx_status='open',
network='mainnet'
)
for bounty in [b for b in bounties if b.no_of_applicants == 0]:
no_applicant_reminder(bounty.bounty_owner_email, bounty)
| agpl-3.0 | 4,106,764,754,830,823,400 | 39.041667 | 132 | 0.689386 | false | 3.875 | false | false | false |
karlwithak/nowradio | nowradio/stationInfoUpdater.py | 1 | 1896 | import requests
import ourUtils
from dbManager import Queries, get_connection
# This program goes through the list of stations in the db and updates information such as
# current listeners, max listeners, peak listeners, status(up or not)
def worker(id_url_list, connection):
cur = connection.cursor()
for id_ip in id_url_list:
url = "http://" + id_ip[1] + '/7.html'
try:
response = requests.get(url, headers=ourUtils.request_header, timeout=2)
except requests.ConnectionError:
print("connection error: " + url)
cur.execute(Queries.set_station_down, (id_ip[0],))
except requests.Timeout:
print("timeout error : " + url)
cur.execute(Queries.set_station_down, (id_ip[0],))
except Exception:
print("unknown error : " + url)
cur.execute(Queries.set_station_down, (id_ip[0],))
else:
if response.status_code in (200, 304) \
and response.text.count(",") >= 6 \
and len(response.text) < 2048:
info = response.text.split(",")
data = {
'is_up': bool(info[1]),
'peak': info[2],
'max': info[3],
'active': info[4],
'id': id_ip[0]
}
cur.execute(Queries.update_station_by_id, data)
else:
print("bad response: " + url)
cur.execute(Queries.set_station_down, (id_ip[0],))
cur.close()
def main():
conn = get_connection()
if conn is None:
exit("could not connect to db")
id_url_list = ourUtils.db_quick_query(conn, Queries.get_all_ips)
ourUtils.multi_thread_runner(id_url_list, worker, conn)
conn.commit()
conn.close()
if __name__ == '__main__':
main()
| mit | 3,592,722,863,100,787,000 | 34.111111 | 90 | 0.533228 | false | 3.792 | false | false | false |
scavallero/mydomus | auth.py | 1 | 3792 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MyDomus - Polling Service
# Copyright (c) 2016 Salvatore Cavallero ([email protected])
# https://github.com/scavallero/mydomus
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time
import json
import hashlib
import logging
import httpapp
import os
#########################################################################
# Module setup
########################################################################
logger = logging.getLogger("Mydomus")
user = {}
def verifyUser(usr,pswd):
res = False
if usr in user.keys():
if user[usr]['password'] == pswd:
res = True
return res
def verifyToken(token):
res = False
usr = ""
for item in user.keys():
if 'token' in user[item].keys():
if user[item]['token'] == token:
res = True
usr = item
return res,usr
def decodeUrlToken(url):
fields = url.split('/')
token = fields[-1]
del fields[-1]
new_url = ''
for item in fields:
if item != '':
new_url = new_url + '/'+item
if new_url == '':
new_url = '/'
res,usr = verifyToken(token)
if res:
return new_url
else:
return None
def load():
global user
logger.info("Start loading user authorization")
CWD = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(CWD,"user.conf")) as data_file:
try:
user = json.load(data_file)
except ValueError: # includes simplejson.decoder.JSONDecodeError
logger.critical('json decoding failure user.conf')
for item in user.keys():
h = hashlib.sha224(item+user[item]['password']).hexdigest()
p = hashlib.md5(user[item]['password']).hexdigest()
user[item]['token'] = h
user[item]['password'] = p
logger.info('User: %s - %s' % (item,h))
### ADDED API ###
@httpapp.addurl('/verify/')
def url_verify(p,m):
global user
fields = p.split('/')
if len(fields) == 4:
if fields[2] in user.keys():
if fields[3] == user[fields[2]]['password']:
return '{"status":"ok","token":"%s"}' % user[fields[2]]['token']
else:
return '{"status":"error","reason":"wrong password"}'
else:
return '{"status":"error","reason":"user unknown"}'
else:
return '{"status":"error","reason":"missing user or password"}'
@httpapp.addurl('/checktoken/')
def url_checktoken(p,m):
global user
fields = p.split('/')
if len(fields) == 3:
token = fields[2]
res,usr = verifyToken(token)
if res:
return '{"status":"ok","user":"%s"}' % usr
else:
return '{"status":"error","reason":"wrong token"}'
else:
return '{"status":"error","reason":"missing token"}'
logger.info("User authorization loaded")
| gpl-3.0 | -3,929,618,149,276,053,000 | 26.882353 | 86 | 0.531909 | false | 4.07304 | false | false | false |
Stanford-Online/edx-analytics-data-api | analytics_data_api/v0/views/__init__.py | 1 | 10029 | from itertools import groupby
from django.db import models
from django.db.models import Q
from django.utils import timezone
from rest_framework import generics, serializers
from opaque_keys.edx.keys import CourseKey
from analytics_data_api.v0.exceptions import CourseNotSpecifiedError
from analytics_data_api.v0.views.utils import (
raise_404_if_none,
split_query_argument,
validate_course_id
)
class CourseViewMixin(object):
"""
Captures the course_id from the url and validates it.
"""
course_id = None
def get(self, request, *args, **kwargs):
self.course_id = self.kwargs.get('course_id', request.query_params.get('course_id', None))
if not self.course_id:
raise CourseNotSpecifiedError()
validate_course_id(self.course_id)
return super(CourseViewMixin, self).get(request, *args, **kwargs)
class PaginatedHeadersMixin(object):
"""
If the response is paginated, then augment it with this response header:
* Link: list of next and previous pagination URLs, e.g.
<next_url>; rel="next", <previous_url>; rel="prev"
Format follows the github API convention:
https://developer.github.com/guides/traversing-with-pagination/
Useful with PaginatedCsvRenderer, so that previous/next links aren't lost when returning CSV data.
"""
# TODO: When we upgrade to Django REST API v3.1, define a custom DEFAULT_PAGINATION_CLASS
# instead of using this mechanism:
# http://www.django-rest-framework.org/api-guide/pagination/#header-based-pagination
def get(self, request, *args, **kwargs):
"""
Stores pagination links in a response header.
"""
response = super(PaginatedHeadersMixin, self).get(request, args, kwargs)
link = self.get_paginated_links(response.data)
if link:
response['Link'] = link
return response
@staticmethod
def get_paginated_links(data):
"""
Returns the links string.
"""
# Un-paginated data is returned as a list, not a dict.
next_url = None
prev_url = None
if isinstance(data, dict):
next_url = data.get('next')
prev_url = data.get('previous')
if next_url is not None and prev_url is not None:
link = '<{next_url}>; rel="next", <{prev_url}>; rel="prev"'
elif next_url is not None:
link = '<{next_url}>; rel="next"'
elif prev_url is not None:
link = '<{prev_url}>; rel="prev"'
else:
link = ''
return link.format(next_url=next_url, prev_url=prev_url)
class CsvViewMixin(object):
"""
Augments a text/csv response with this header:
* Content-Disposition: allows the client to download the response as a file attachment.
"""
# Default filename slug for CSV download files
filename_slug = 'report'
def get_csv_filename(self):
"""
Returns the filename for the CSV download.
"""
course_key = CourseKey.from_string(self.course_id)
course_id = u'-'.join([course_key.org, course_key.course, course_key.run])
now = timezone.now().replace(microsecond=0)
return u'{0}--{1}--{2}.csv'.format(course_id, now.isoformat(), self.filename_slug)
def finalize_response(self, request, response, *args, **kwargs):
"""
Append Content-Disposition header to CSV requests.
"""
if request.META.get('HTTP_ACCEPT') == u'text/csv':
response['Content-Disposition'] = u'attachment; filename={}'.format(self.get_csv_filename())
return super(CsvViewMixin, self).finalize_response(request, response, *args, **kwargs)
class APIListView(generics.ListAPIView):
"""
An abstract view to store common code for views that return a list of data.
**Example Requests**
GET /api/v0/some_endpoint/
Returns full list of serialized models with all default fields.
GET /api/v0/some_endpoint/?ids={id_1},{id_2}
Returns list of serialized models with IDs that match an ID in the given
`ids` query parameter with all default fields.
GET /api/v0/some_endpoint/?ids={id_1},{id_2}&fields={some_field_1},{some_field_2}
Returns list of serialized models with IDs that match an ID in the given
`ids` query parameter with only the fields in the given `fields` query parameter.
GET /api/v0/some_endpoint/?ids={id_1},{id_2}&exclude={some_field_1},{some_field_2}
Returns list of serialized models with IDs that match an ID in the given
`ids` query parameter with all fields except those in the given `exclude` query
parameter.
POST /api/v0/some_endpoint/
{
"ids": [
"{id_1}",
"{id_2}",
...
"{id_200}"
],
"fields": [
"{some_field_1}",
"{some_field_2}"
]
}
**Response Values**
Since this is an abstract class, this view just returns an empty list.
**Parameters**
This view supports filtering the results by a given list of IDs. It also supports
explicitly specifying the fields to include in each result with `fields` as well of
the fields to exclude with `exclude`.
For GET requests, these parameters are passed in the query string.
For POST requests, these parameters are passed as a JSON dict in the request body.
ids -- The comma-separated list of identifiers for which results are filtered to.
For example, 'edX/DemoX/Demo_Course,course-v1:edX+DemoX+Demo_2016'. Default is to
return all courses.
fields -- The comma-separated fields to return in the response.
For example, 'course_id,created'. Default is to return all fields.
exclude -- The comma-separated fields to exclude in the response.
For example, 'course_id,created'. Default is to not exclude any fields.
**Notes**
* GET is usable when the number of IDs is relatively low
* POST is required when the number of course IDs would cause the URL to be too long.
* POST functions the same as GET here. It does not modify any state.
"""
ids = None
fields = None
exclude = None
always_exclude = []
model_id_field = 'id'
ids_param = 'ids'
def get_serializer(self, *args, **kwargs):
kwargs.update({
'context': self.get_serializer_context(),
'fields': self.fields,
'exclude': self.exclude
})
return self.get_serializer_class()(*args, **kwargs)
def get(self, request, *args, **kwargs):
query_params = self.request.query_params
self.fields = split_query_argument(query_params.get('fields'))
exclude = split_query_argument(query_params.get('exclude'))
self.exclude = self.always_exclude + (exclude if exclude else [])
self.ids = split_query_argument(query_params.get(self.ids_param))
self.verify_ids()
return super(APIListView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
# self.request.data is a QueryDict. For keys with singleton lists as values,
# QueryDicts return the singleton element of the list instead of the list itself,
# which is undesirable. So, we convert to a normal dict.
request_data_dict = dict(request.data)
self.fields = request_data_dict.get('fields')
exclude = request_data_dict.get('exclude')
self.exclude = self.always_exclude + (exclude if exclude else [])
self.ids = request_data_dict.get(self.ids_param)
self.verify_ids()
return super(APIListView, self).get(request, *args, **kwargs)
def verify_ids(self):
"""
Optionally raise an exception if any of the IDs set as self.ids are invalid.
By default, no verification is done.
Subclasses can override this if they wish to perform verification.
"""
pass
def base_field_dict(self, item_id):
"""Default result with fields pre-populated to default values."""
field_dict = {
self.model_id_field: item_id,
}
return field_dict
def update_field_dict_from_model(self, model, base_field_dict=None, field_list=None):
field_list = (field_list if field_list else
[f.name for f in self.model._meta.get_fields()]) # pylint: disable=protected-access
field_dict = base_field_dict if base_field_dict else {}
field_dict.update({field: getattr(model, field) for field in field_list})
return field_dict
def postprocess_field_dict(self, field_dict):
"""Applies some business logic to final result without access to any data from the original model."""
return field_dict
def group_by_id(self, queryset):
"""Return results aggregated by a distinct ID."""
aggregate_field_dict = []
for item_id, model_group in groupby(queryset, lambda x: (getattr(x, self.model_id_field))):
field_dict = self.base_field_dict(item_id)
for model in model_group:
field_dict = self.update_field_dict_from_model(model, base_field_dict=field_dict)
field_dict = self.postprocess_field_dict(field_dict)
aggregate_field_dict.append(field_dict)
return aggregate_field_dict
def get_query(self):
return reduce(lambda q, item_id: q | Q(id=item_id), self.ids, Q())
@raise_404_if_none
def get_queryset(self):
if self.ids:
queryset = self.model.objects.filter(self.get_query())
else:
queryset = self.model.objects.all()
field_dict = self.group_by_id(queryset)
# Django-rest-framework will serialize this dictionary to a JSON response
return field_dict
| agpl-3.0 | 8,010,458,603,060,841,000 | 36.561798 | 109 | 0.626683 | false | 4.001995 | false | false | false |
vvoZokk/dnn | dnn_project/generate_protos.py | 1 | 4557 | #!/usr/bin/env python
import os
import argparse
import re
from collections import defaultdict
import sys
KNOWN_TYPES = {
"double" : "double",
"int" : "int32",
"size_t" : "uint32",
"float" : "float",
"string" : "string",
"bool" : "bool",
"complex<double>" : "double",
"pair<string, size_t>" : "TStringToUintPair",
"pair<size_t, size_t>" : "TUintToUintPair",
}
VECTOR_RE = re.compile("(?:vector|ActVector)+<(.*)>")
def generateProtos(all_structures, package, dst, imports):
for fname, structures in all_structures.iteritems():
dst_file = fname.split(".")[0] + ".proto"
with open(os.path.join(dst, dst_file), 'w') as f_ptr:
f_ptr.write("package %s;\n" % package)
f_ptr.write("\n")
for imp in imports:
f_ptr.write("import \"{}\";\n".format(imp))
f_ptr.write("\n")
for s in structures:
f_ptr.write("message %s {\n" % s['name'])
i = 1
for f in s['fields']:
if KNOWN_TYPES.get(f[0]) is None:
m = VECTOR_RE.match(f[0])
if m is None:
raise Exception("Can't match {}".format(f[0]))
f_ptr.write(" repeated %s %s = %s;\n" % (KNOWN_TYPES[ m.group(1) ], f[1], str(i)))
if m.group(1).startswith("complex"):
f_ptr.write(" repeated %s %s = %s;\n" % (KNOWN_TYPES[ m.group(1) ], f[1] + "_imag", str(i+1)))
i += 1
else:
f_ptr.write(" required %s %s = %s;\n" % (KNOWN_TYPES[ f[0] ], f[1], str(i)))
i += 1
f_ptr.write("}\n")
f_ptr.write("\n")
def parseSources(src):
structures = defaultdict(list)
for root, dirs, files in os.walk(src):
for f in files:
af = os.path.join(root, f)
generate_proto = False
if af.endswith(".cpp") or af.endswith(".h"):
for l in open(af):
l = l.strip()
l = l.split("//")[0]
if "@GENERATE_PROTO@" in l:
generate_proto = True
struct = {}
curly_counter = 0
continue
if generate_proto:
curly_counter += l.count("{")
curly_counter -= l.count("}")
if len(struct) == 0:
m = re.match("[\W]*(?:class|struct)[\W]+([^ ]+)", l)
if not m:
raise Exception("Can't parse GENERATE_PROTO class or struct")
struct['name'] = m.group(1)
struct['fields'] = []
else:
m = re.match(
"(%s)[\W]+(?!__)([^ ]*);[\W]*$" % "|".join(
KNOWN_TYPES.keys() + [ "(?:vector|ActVector)+<{}>".format(t) for t in KNOWN_TYPES.keys() ]
),
l
)
if m and curly_counter == 1:
struct['fields'].append( (m.group(1), m.group(2)) )
continue
if len(struct) > 0 and curly_counter == 0:
generate_proto = False
structures[f].append(struct)
return structures
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--source-path", help="Path to the sources",
type=str, required=True)
parser.add_argument("-d", "--dest-path", help="Path where to store .proto",
type=str, required=True)
parser.add_argument("-p", "--package", help="Package name, default : %(default)s",
type=str, required=False, default="Protos")
parser.add_argument("-i", "--imports", help="Put imports to all messages (separated by ;)",
type=str, required=False, default=None)
args = parser.parse_args()
structures = parseSources(args.source_path)
imports = []
if args.imports:
imports = [ v.strip() for v in args.imports.split(";") if v.strip() ]
generateProtos(structures, args.package, args.dest_path, imports)
| mit | 8,567,659,111,309,439,000 | 43.242718 | 126 | 0.432521 | false | 4.025618 | false | false | false |
yasserglez/pytiger2c | packages/pytiger2c/dot.py | 1 | 2786 | # -*- coding: utf-8 -*-
"""
Clases utilizadas en la generación de un archivo Graphviz DOT con el
árbol de sintáxis abstracta creado a partir de un programa Tiger.
"""
class DotGenerator(object):
"""
Clase utilizada para la generación de grafos en formato Graphviz DOT.
"""
def __init__(self):
"""
Esta clase es utilizada en la generación de código Graphivz DOT
a partir de un árbol de sintáxis abstracta de un programa Tiger.
"""
self._nodes = []
self._edges = []
self._num_nodes = 0
def add_node(self, label):
"""
Añade un nuevo nodo al grafo actualmente en creación.
@type label: C{str}
@param label: Nombre del nodo que se quiere añadir.
@rtype: C{str}
@return: Identificador del nuevo nodo añadido. Este identificador
puede ser utilizado para crear nuevas aristas, utilizando
el método C{add_edge} de esta misma clase, que tengan
este nodo como uno de los extremos.
"""
self._num_nodes += 1
name = 'node{number}'.format(number=self._num_nodes)
code = '{name} [label="{label}"];'.format(name=name, label=label)
self._nodes.append(code)
return name
def add_edge(self, from_node, to_node):
"""
Añade una arista no dirigida al grafo actualmente en creación.
@type from_node: C{str}
@param from_node: Cadena de caracteres que identifica un nodo
extremo de la arista.
@type to_node: C{str}
@param to_node: Cadena de caracteres que identifica un nodo
extremo de la arista.
"""
template = '{from_node} -- {to_node};'
code = template.format(from_node=from_node, to_node=to_node)
self._edges.append(code)
def write(self, output_fd):
"""
Escribe el código Graphviz DOT en un descriptor de fichero.
@type output_fd: C{file}
@param output_fd: Descriptor de fichero donde se debe escribir el
código Graphviz DOT resultante de la traducción del programa
Tiger descrito por el árbol de sintáxis abstracta.
"""
indent = ' ' * 4
output_fd.write('graph AST {\n')
output_fd.write(indent)
output_fd.write('node [shape=record];\n\n')
for node_code in self._nodes:
output_fd.write(indent)
output_fd.write(node_code)
output_fd.write('\n')
output_fd.write('\n')
for edge_code in self._edges:
output_fd.write(indent)
output_fd.write(edge_code)
output_fd.write('\n')
output_fd.write('}\n')
| mit | -712,239,282,687,048,000 | 33.575 | 74 | 0.574476 | false | 3.414815 | false | false | false |
Arzaroth/virtualnetwork | network/mapParser.py | 1 | 4727 | #!/usr/bin/python3.3 -O
from pyrser import grammar,meta
from pyrser.directives import ignore
from network import Host, Router
import sys
def insensitiveCase(s):
return "[" + " ".join("['" + "'|'".join(x) + "']" for x in map((lambda each: [each.lower(), each.upper()]), s)) + "]"
class MapParser(grammar.Grammar):
entry = "Map"
grammar = """
Map = [#init_map(_) @ignore("null") [[[Hosts:h #add_host(_, h)] | [Routers:r #add_router(_, r)]] eol*]+
#link_hosts(_) eof]
Hosts = [#init_host(_) '[' ws {host} ws ']' eol+ [[Name | Ip | TTL | Route]:r #add_fhost(_, r)]+]
Routers = [#init_router(_) '[' ws {router} ws ']' eol+ [[Name | Ip | TTL | Route]:r #add_frouter(_, r)]+]
Name = [ws {name} ws '=' ws id:i #ret_f(_, "id", i) ws eol+]
Ip = [ws {ip} ws '=' ws cidraddr:c #ret_f(_, "ip", c) ws eol+]
TTL = [ws {ttl} ws '=' ws num:n #ret_f(_, "ttl", n) ws eol+]
Route = [ws {route} ws '=' ws [[{default}:c ws id:i #ret_f(_, "route", c, i)]
| [cidraddr:c ws id:i #ret_f(_, "route", c, i)]] ws eol+]
cidraddr = [num '.' num '.' num '.' num '/' num]
ws = [[' ' | '\r' | '\t']*]
""".format(host = insensitiveCase("Host"),
router = insensitiveCase("Router"),
route = insensitiveCase("Route"),
ip = insensitiveCase("IP"),
ttl = insensitiveCase("TTL"),
name = insensitiveCase("Name"),
default = insensitiveCase("Default"),
internet = insensitiveCase("Internet"))
@meta.hook(MapParser)
def init_map(self, ast):
ast.network = {}
ast.routes = {}
return True
@meta.hook(MapParser)
def init_host(self, ast):
self.init_map(ast)
ast.network["route"] = []
return True
@meta.hook(MapParser)
def init_router(self, ast):
self.init_host(ast)
ast.network["ips"] = []
return True
@meta.hook(MapParser)
def link_hosts(self, ast):
for k,v in ast.routes.items():
for x in v:
if x[1] not in ast.network:
raise Exception("Unknown host ({0}) for {1} route.".format(x[1], k))
ast.network[k].addRoute(ast.network[x[1]], x[0])
return True
def base_add(ast, h):
if "name" not in h.network:
raise Exception("Missing name field for given host:\n{0}".format(self.value(h)))
if h.network["name"] in ast.network:
raise Exception("Redefinion of {0}.".format(h.network["name"]))
ast.routes[h.network["name"]] = h.network["route"][::]
@meta.hook(MapParser)
def add_host(self, ast, h):
base_add(ast, h)
if "ip" not in h.network:
raise Exception("Missing ip field for given host:\n{0}".format(self.value(h)))
if "ttl" in h.network:
ast.network[h.network["name"]] = Host(h.network["name"],
h.network["ip"], h.network["ttl"])
else:
ast.network[h.network["name"]] = Host(h.network["name"],
h.network["ip"])
return True
@meta.hook(MapParser)
def add_router(self, ast, h):
base_add(ast, h)
if not h.network["ips"]:
raise Exception("Missing ip field for given host")
if "ttl" in h.network:
ast.network[h.network["name"]] = Router(h.network["name"],
*h.network["ips"], ttl = h.network["ttl"])
else:
ast.network[h.network["name"]] = Router(h.network["name"],
*h.network["ips"])
return True
@meta.hook(MapParser)
def ret_f(self, ast, *args):
ast.retvals = [args[0]]
ast.retvals.extend([self.value(x) for x in args[1:]])
return True
@meta.hook(MapParser)
def add_fhost(self, ast, r):
def reg_name(ast, name):
ast.network["name"] = name[0]
def reg_ip(ast, ip):
ast.network["ip"] = ip[0]
def reg_ttl(ast, ttl):
ast.network["ttl"] = ttl[0]
def reg_route(ast, route):
ast.network["route"].append(route)
fmap = {'id' : reg_name,
'ip' : reg_ip,
'ttl' : reg_ttl,
'route' : reg_route}
if r.retvals[0] in fmap:
fmap[r.retvals[0]](ast, r.retvals[1:])
return True
@meta.hook(MapParser)
def add_frouter(self, ast, r):
def reg_name(ast, name):
ast.network["name"] = name[0]
def reg_ip(ast, ip):
ast.network["ips"].append(ip[0])
def reg_ttl(ast, ttl):
ast.network["ttl"] = ttl[0]
def reg_route(ast, route):
ast.network["route"].append(route)
fmap = {'id' : reg_name,
'ip' : reg_ip,
'ttl' : reg_ttl,
'route' : reg_route}
if r.retvals[0] in fmap:
fmap[r.retvals[0]](ast, r.retvals[1:])
return True
| gpl-3.0 | 5,205,902,679,381,287,000 | 31.826389 | 121 | 0.533108 | false | 3.136695 | false | false | false |
qma/pants | src/python/pants/backend/jvm/tasks/resources_task.py | 1 | 3335 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from abc import abstractmethod
from pants.backend.core.tasks.task import Task
from pants.option.custom_types import list_option
class ResourcesTask(Task):
"""A base class for tasks that process or create resource files.
This base assumes that resources targets or targets that generate resources are independent from
each other and can be processed in isolation in any order.
"""
@classmethod
def product_types(cls):
return ['runtime_classpath']
@classmethod
def register_options(cls, register):
super(ResourcesTask, cls).register_options(register)
register('--confs', advanced=True, type=list_option, default=['default'],
help='Prepare resources for these Ivy confs.')
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data('compile_classpath')
@property
def cache_target_dirs(self):
return True
def execute(self):
# Tracked and returned for use in tests.
# TODO: Rewrite those tests. execute() is not supposed to return anything.
processed_targets = []
compile_classpath = self.context.products.get_data('compile_classpath')
runtime_classpath = self.context.products.get_data('runtime_classpath', compile_classpath.copy)
all_relevant_resources_targets = self.find_all_relevant_resources_targets()
if not all_relevant_resources_targets:
return processed_targets
with self.invalidated(targets=all_relevant_resources_targets,
fingerprint_strategy=self.create_invalidation_strategy(),
invalidate_dependents=False,
topological_order=False) as invalidation:
for vt in invalidation.all_vts:
# Register the target's chroot in the products.
for conf in self.get_options().confs:
runtime_classpath.add_for_target(vt.target, [(conf, vt.results_dir)])
# And if it was invalid, generate the resources to the chroot.
if not vt.valid:
self.prepare_resources(vt.target, vt.results_dir)
processed_targets.append(vt.target)
vt.update()
return processed_targets
@abstractmethod
def find_all_relevant_resources_targets(self):
"""Returns an iterable over all the relevant resources targets in the context."""
def create_invalidation_strategy(self):
"""Creates a custom fingerprint strategy for determining invalid resources targets.
:returns: A custom fingerprint strategy to use for determining invalid targets, or `None` to
use the standard target payload.
:rtype: :class:`pants.base.fingerprint_strategy.FingerprintStrategy`
"""
return None
@abstractmethod
def prepare_resources(self, target, chroot):
"""Prepares the resources associated with `target` in the given `chroot`.
:param target: The target to prepare resource files for.
:type target: :class:`pants.build_graph.target.Target`
:param string chroot: An existing, clean chroot dir to generate `target`'s resources to.
"""
| apache-2.0 | 8,281,013,611,547,203,000 | 37.333333 | 99 | 0.704648 | false | 4.382392 | false | false | false |
opennode/waldur-mastermind | src/waldur_core/structure/managers.py | 1 | 4844 | from django.db import models
from waldur_core.core.managers import GenericKeyMixin
def get_permission_subquery(permissions, user):
subquery = models.Q()
for entity in ('customer', 'project'):
path = getattr(permissions, '%s_path' % entity, None)
if not path:
continue
if path == 'self':
prefix = 'permissions__'
else:
prefix = path + '__permissions__'
kwargs = {prefix + 'user': user, prefix + 'is_active': True}
subquery |= models.Q(**kwargs)
build_query = getattr(permissions, 'build_query', None)
if build_query:
subquery |= build_query(user)
return subquery
def filter_queryset_for_user(queryset, user):
if user is None or user.is_staff or user.is_support:
return queryset
try:
permissions = queryset.model.Permissions
except AttributeError:
return queryset
subquery = get_permission_subquery(permissions, user)
if not subquery:
return queryset
return queryset.filter(subquery).distinct()
class StructureQueryset(models.QuerySet):
""" Provides additional filtering by customer or project (based on permission definition).
Example:
.. code-block:: python
Instance.objects.filter(project=12)
Droplet.objects.filter(
customer__name__startswith='A',
state=Droplet.States.ONLINE)
Droplet.objects.filter(Q(customer__name='Alice') | Q(customer__name='Bob'))
"""
def exclude(self, *args, **kwargs):
return super(StructureQueryset, self).exclude(
*[self._patch_query_argument(a) for a in args],
**self._filter_by_custom_fields(**kwargs)
)
def filter(self, *args, **kwargs):
return super(StructureQueryset, self).filter(
*[self._patch_query_argument(a) for a in args],
**self._filter_by_custom_fields(**kwargs)
)
def _patch_query_argument(self, arg):
# patch Q() objects if passed and add support of custom fields
if isinstance(arg, models.Q):
children = []
for opt in arg.children:
if isinstance(opt, models.Q):
children.append(self._patch_query_argument(opt))
else:
args = self._filter_by_custom_fields(**dict([opt]))
children.append(tuple(args.items())[0])
arg.children = children
return arg
def _filter_by_custom_fields(self, **kwargs):
# traverse over filter arguments in search of custom fields
args = {}
fields = [f.name for f in self.model._meta.get_fields()]
for field, val in kwargs.items():
base_field = field.split('__')[0]
if base_field in fields:
args.update(**{field: val})
elif base_field in ('customer', 'project'):
args.update(self._filter_by_permission_fields(base_field, field, val))
else:
args.update(**{field: val})
return args
def _filter_by_permission_fields(self, name, field, value):
# handle fields connected via permissions relations
extra = '__'.join(field.split('__')[1:]) if '__' in field else None
try:
# look for the target field path in Permissions class,
path = getattr(self.model.Permissions, '%s_path' % name)
except AttributeError:
# fallback to FieldError if it's missed
return {field: value}
else:
if path == 'self':
if extra:
return {extra: value}
else:
return {
'pk': value.pk if isinstance(value, models.Model) else value
}
else:
if extra:
path += '__' + extra
return {path: value}
StructureManager = models.Manager.from_queryset(StructureQueryset)
class ServiceSettingsManager(GenericKeyMixin, models.Manager):
""" Allows to filter and get service settings by generic key """
def get_available_models(self):
""" Return list of models that are acceptable """
from waldur_core.structure.models import BaseResource
return BaseResource.get_all_models()
class SharedServiceSettingsManager(ServiceSettingsManager):
def get_queryset(self):
return (
super(SharedServiceSettingsManager, self).get_queryset().filter(shared=True)
)
class PrivateServiceSettingsManager(ServiceSettingsManager):
def get_queryset(self):
return (
super(PrivateServiceSettingsManager, self)
.get_queryset()
.filter(shared=False)
)
| mit | 4,239,172,662,321,143,000 | 31.293333 | 94 | 0.579067 | false | 4.510242 | false | false | false |
mkocka/galaxytea | modeling/domcek/plots.py | 1 | 4294 | import matplotlib.pyplot as plt
from numpy import *
###List of variables
# r_in [10**10 cm] innder radius
# r_out [10**10 cm] outer radius
# step [10**10 cm] step of plot
# alfa [] parameter of accretion
# M_16 [10**16 g.s**(-1)] accretion flow
# m_1 [solar mass] mass of compact object
# R_hv [10**10 cm] radius of compact object
# R_10 [10**10 cm] distance from compact object
# f numerical factor
###List of computed parameters
# Surface density [g.cm**(-2)] (sigma)
# Height [cm] (H)
# Density [g.cm**(-3)] (rho)
# Central disc temeprature [K] (T_c)
# Opacity [] (tau)
# viscosity [cm**2.s**(-1)] (nu)
# radial velocity towards center [cm.s**(-1)] (v_r)
###function solutions parameters
# parameter 1 r_in
# parameter 2 r_out
# parameter 3 step
# parameter 4 alfa
# parameter 5 M_16
# parameter 6 m_1
# parameter 7 R_hv
def solutions(r_in,r_out,step,alfa,M_16,m_1,R_hv):
#defining lists
list_function = arange(r_in,r_out,step)
R_10_l,surface_density_l,height_l,density_l,Fx = ([] for i in range(5))
temperature_l,opacity_l,viscosity_l,radial_velocity_l = ([] for i in range(4))
#computation and appending to lists
for R_10 in list_function:
f=(1-((R_hv)/(R_10))**(1.0/2))**(1.0/4)
surface_density = 5.2*alfa**(-4.0/5)*M_16**(7.0/10)*m_1**(1.0/4)*R_10**(-3.0/4)*f**(14.0/5)
height = 1.7*10**8*alfa**(-1.0/10)*M_16**(3.0/20)*m_1**(-3.0/8)*R_10**(9.0/8)*f**(3.0/5)
density = 3.1*10**(-8)*alfa**(-7.0/10)*M_16**(11.0/20)*m_1**(5.0/8)*R_10**(-15.0/8)*f**(11.0/5)
temperature = 1.4*10**4*alfa**(-1.0/5)*M_16**(3.0/10)*m_1**(1.0/4)*R_10**(-3.0/4)*f**(6.0/5)
opacity = 190*alfa**(-4.0/5)*M_16**(1.0/5)*f**(4.0/5)
viscosity = 1.8*10**14*alfa**(4.0/5)*M_16**(3.0/10)*m_1**(-1.0/4)*R_10**(3.0/4)*f**(6.0/5)
radial_velocity = 2.7*10**4*alfa**(4.0/5)*M_16**(3.0/10)*m_1**(-1.0/4)*R_10**(-1.0/4)*f**(-14.0/5)
R_10_l.append(R_10)
surface_density_l.append(surface_density)
height_l.append(height)
density_l.append(density)
temperature_l.append(temperature)
opacity_l.append(opacity)
viscosity_l.append(viscosity)
radial_velocity_l.append(radial_velocity)
Fx.append(f)
#transformation R_10 to kolimeters
R_km = [ x / 10**(-4) for x in R_10_l]
return R_km, surface_density_l, height_l, density_l,temperature_l,opacity_l,viscosity_l,radial_velocity_l,Fx
#for definitions of parameters look up
r_in =1.0001*10**(-4)
r_out =10**(-2)
step = 10**(-6)
alfa = 0.5
M_16 = 63
m_1 = 1.5
R_hv = 1.0*10**(-4)
lists=solutions(r_in,r_out,step,alfa,M_16,m_1,R_hv)
print 30*"-"
print "Used parameter values"
print 30*"-"
print "innder radius:", 10*".",r_in, 10*".", "[10$^{10}$ cm]"
print "outer radius:", 10*".", r_out, 10*".", "[10$^{10}$ cm]"
print "step of plot:", 10*".", step, 10*".", "[10$^{10}$ cm]"
print "parameter of accretion alfa:", 10*".", alfa
print "accretion flow:", 10*".", M_16, 10*".", "[10$^6$ g.s${-1)}$]"
print "mass of compact object:", 10*".", m_1, 10*".", "[solar mass]"
print "radius of compact object:", 10*".", R_hv, 10*".", "[10$^{10}$ cm]"
plt.plot(lists[0], lists[1])
plt.title('surface density')
plt.xlabel('radius [km]')
plt.ylabel('surface density [g.cm$^{-2}$] ')
plt.grid()
plt.savefig("surface density")
plt.gcf().clear()
plt.plot(lists[0], lists[2])
plt.title('height')
plt.xlabel('radius [km]')
plt.ylabel('height [cm] ')
plt.grid()
plt.savefig("height")
plt.gcf().clear()
plt.plot(lists[0], lists[3])
plt.title('density')
plt.xlabel('radius [km]')
plt.ylabel('density [g.cm$^{-3}$] ')
plt.grid()
plt.savefig("density")
plt.gcf().clear()
plt.plot(lists[0], lists[4])
plt.title('temperature')
plt.xlabel('radius [km]')
plt.ylabel('temperature [K] ')
plt.grid()
plt.savefig("temperature")
plt.gcf().clear()
plt.plot(lists[0], lists[5])
plt.title('opacity')
plt.xlabel('radius [km]')
plt.ylabel('opacity ')
plt.grid()
plt.savefig("opacity")
plt.gcf().clear()
plt.plot(lists[0], lists[6])
plt.title('viscosity')
plt.xlabel('radius [km]')
plt.ylabel('viscosity [cm$^{2}$.s$^{-1}$] ')
plt.grid()
plt.savefig("viscosity")
plt.gcf().clear()
plt.plot(lists[0], lists[7])
plt.title('radial velocity')
plt.xlabel('radius [km]')
plt.ylabel('radial velocity [cm.s$^{-1}$] ')
plt.grid()
plt.savefig("radial velocity")
plt.gcf().clear()
| mit | 4,655,926,774,987,794,000 | 29.671429 | 109 | 0.608058 | false | 2.229491 | false | false | false |
MobSF/Mobile-Security-Framework-MobSF | mobsf/StaticAnalyzer/views/ios/view_source.py | 1 | 5000 | # -*- coding: utf_8 -*-
"""iOS View Source."""
import io
import json
import logging
import ntpath
import os
from pathlib import Path
import biplist
from django.conf import settings
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.utils.html import escape
from mobsf.MobSF.forms import FormUtil
from mobsf.MobSF.utils import (
is_file_exists,
is_safe_path,
print_n_send_error_response,
read_sqlite,
)
from mobsf.StaticAnalyzer.forms import (
ViewSourceIOSApiForm,
ViewSourceIOSForm,
)
logger = logging.getLogger(__name__)
def set_ext_api(file_path):
"""Smart Function to set Extenstion."""
ext = file_path.split('.')[-1]
if ext == 'plist':
return 'plist'
elif ext == 'xml':
return 'xml'
elif ext in ['sqlitedb', 'db', 'sqlite']:
return 'db'
elif ext == 'm':
return 'm'
else:
return 'txt'
def run(request, api=False):
"""View iOS Files."""
try:
logger.info('View iOS Source File')
exp = 'Error Description'
file_format = None
if api:
fil = request.POST['file']
md5_hash = request.POST['hash']
mode = request.POST['type']
viewsource_form = ViewSourceIOSApiForm(request.POST)
else:
fil = request.GET['file']
md5_hash = request.GET['md5']
mode = request.GET['type']
viewsource_form = ViewSourceIOSForm(request.GET)
typ = set_ext_api(fil)
if not viewsource_form.is_valid():
err = FormUtil.errors_message(viewsource_form)
if api:
return err
return print_n_send_error_response(request, err, False, exp)
base = Path(settings.UPLD_DIR) / md5_hash
if mode == 'ipa':
src1 = base / 'payload'
src2 = base / 'Payload'
if src1.exists():
src = src1
elif src2.exists():
src = src2
else:
raise Exception('MobSF cannot find Payload directory')
elif mode == 'ios':
src = base
sfile = src / fil
sfile = sfile.as_posix()
if not is_safe_path(src, sfile):
msg = 'Path Traversal Detected!'
if api:
return {'error': 'Path Traversal Detected!'}
return print_n_send_error_response(request, msg, False, exp)
dat = ''
sql_dump = {}
if typ == 'm':
file_format = 'cpp'
with io.open(sfile,
mode='r',
encoding='utf8',
errors='ignore') as flip:
dat = flip.read()
elif typ == 'xml':
file_format = 'xml'
with io.open(sfile,
mode='r',
encoding='utf8',
errors='ignore') as flip:
dat = flip.read()
elif typ == 'plist':
file_format = 'json'
dat = biplist.readPlist(sfile)
try:
dat = json.dumps(dat, indent=4, sort_keys=True)
except Exception:
pass
elif typ == 'db':
file_format = 'asciidoc'
sql_dump = read_sqlite(sfile)
elif typ == 'txt' and fil == 'classdump.txt':
file_format = 'cpp'
app_dir = os.path.join(settings.UPLD_DIR, md5_hash + '/')
cls_dump_file = os.path.join(app_dir, 'classdump.txt')
if is_file_exists(cls_dump_file):
with io.open(cls_dump_file, # lgtm [py/path-injection]
mode='r',
encoding='utf8',
errors='ignore') as flip:
dat = flip.read()
else:
dat = 'Class Dump result not Found'
elif typ == 'txt':
file_format = 'text'
with io.open(sfile,
mode='r',
encoding='utf8',
errors='ignore') as flip:
dat = flip.read()
else:
if api:
return {'error': 'Invalid Parameters'}
return HttpResponseRedirect('/error/')
context = {
'title': escape(ntpath.basename(fil)),
'file': escape(ntpath.basename(fil)),
'type': file_format,
'data': dat,
'sqlite': sql_dump,
'version': settings.MOBSF_VER,
}
template = 'general/view.html'
if api:
return context
return render(request, template, context)
except Exception as exp:
logger.exception('Error Viewing Source')
msg = str(exp)
exp = exp.__doc__
if api:
return print_n_send_error_response(request, msg, True, exp)
return print_n_send_error_response(request, msg, False, exp)
| gpl-3.0 | 7,384,499,573,915,808,000 | 31.258065 | 72 | 0.4996 | false | 4.05515 | false | false | false |
alexandrevicenzi/pycompat | tests/test.py | 1 | 2225 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# These tests run only under Linux and Python 2.x +
# This is the Travis CI environment.
#
from pycompat import python as py
from pycompat import system
import sys
import unittest
class TestPyCompat(unittest.TestCase):
def test_python_is_64bits(self):
self.assertEqual(py.is_64bits, not py.is_32bits)
def test_is_cpython(self):
self.assertEqual(py.is_cpython, not py.is_pypy)
def test_immutability(self):
with self.assertRaises(AttributeError):
py.is2xx = 1
def test_python_is1xx(self):
self.assertFalse(py.is1xx)
def test_python_is2xx(self):
self.assertEqual(py.is2xx, sys.version_info[0] == 2)
def test_python_is3xx(self):
self.assertEqual(py.is3xx, sys.version_info[0] == 3)
def test_python_is_eqx(self):
self.assertTrue(py.is_eq(sys.version_info[0]))
def test_python_is_eqxx(self):
self.assertTrue(py.is_eq(sys.version_info[0], sys.version_info[1]))
def test_python_is_eqxxx(self):
self.assertTrue(py.is_eq(sys.version_info[0], sys.version_info[1], sys.version_info[2]))
def test_python_is_gtx(self):
self.assertTrue(py.is_gt(sys.version_info[0] - 1))
def test_python_is_gtxx(self):
self.assertTrue(py.is_gt(sys.version_info[0], sys.version_info[1] - 1))
def test_python_is_gtxxx(self):
self.assertTrue(py.is_gt(sys.version_info[0], sys.version_info[1], sys.version_info[2] - 1))
def test_python_is_ltx(self):
self.assertTrue(py.is_lt(sys.version_info[0] + 1))
def test_python_is_ltxx(self):
self.assertTrue(py.is_lt(sys.version_info[0], sys.version_info[1] + 1))
def test_python_is_ltxxx(self):
self.assertTrue(py.is_lt(sys.version_info[0], sys.version_info[1], sys.version_info[2] + 1))
def test_system_is_windows(self):
self.assertFalse(system.is_windows)
def test_system_is_cygwin(self):
self.assertFalse(system.is_cygwin)
def test_system_is_mac_os(self):
self.assertFalse(system.is_mac_os)
def test_system_is_linux(self):
self.assertTrue(system.is_linux)
if __name__ == '__main__':
unittest.main()
| mit | -7,611,998,995,524,017,000 | 27.525641 | 100 | 0.649888 | false | 2.966667 | true | false | false |
mitsuhiko/zine | zine/utils/exceptions.py | 1 | 1632 | # -*- coding: utf-8 -*-
"""
zine.utils.exceptions
~~~~~~~~~~~~~~~~~~~~~
Exception utility module.
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from zine.i18n import _
class ZineException(Exception):
"""Baseclass for all Zine exceptions."""
message = None
def __init__(self, message=None):
Exception.__init__(self)
if message is not None:
self.message = message
def __str__(self):
return self.message or ''
def __unicode__(self):
return str(self).decode('utf-8', 'ignore')
class UserException(ZineException):
"""Baseclass for exception with unicode messages."""
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
if self.message is None:
return u''
return unicode(self.message)
def summarize_exception(exc_info):
def _to_unicode(x):
try:
return unicode(x)
except UnicodeError:
return str(x).encode('utf-8', 'replace')
exc_type, exc_value, tb = exc_info
if isinstance(exc_type, basestring):
prefix = _to_unicode(exc_type)
else:
prefix = _to_unicode(exc_type.__name__)
message = _to_unicode(exc_value)
filename = tb.tb_frame.f_globals.get('__file__')
if filename is None:
filename = _(u'unkown file')
else:
filename = _to_unicode(filename)
if filename.endswith('.pyc'):
filename = filename[:-1]
return u'%s: %s' % (prefix, message), (filename, tb.tb_lineno)
| bsd-3-clause | -1,362,887,440,807,812,400 | 24.5 | 72 | 0.579657 | false | 3.84 | false | false | false |
kasperschmidt/TDOSE | tdose_extract_spectra.py | 1 | 43824 | # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
import numpy as np
import sys
import astropy.io.fits as afits
import collections
import tdose_utilities as tu
import tdose_extract_spectra as tes
import tdose_build_mock_cube as tbmc
import pdb
import scipy.ndimage.filters as snf
import matplotlib as mpl
mpl.use('Agg') # prevent pyplot from opening window; enables closing ssh session with detached screen running TDOSE
import matplotlib.pyplot as plt
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def extract_spectra(model_cube_file,source_association_dictionary=None,nameext='tdose_spectrum',outputdir='./',clobber=False,
variance_cube_file=None,variance_cube_ext='ERROR',source_model_cube_file=None,source_cube_ext='DATA',
model_cube_ext='DATA',layer_scale_ext='WAVESCL',data_cube_file=None,verbose=True):
"""
Assemble the spectra determined by the wavelength layer scaling of the normalized models
when generating the source model cube
--- INPUT ---
model_cube_file Model cube to base extraction on (using header info and layer scales)
source_association_dictionary Source association dictionary defining what sources should be combined into
objects (individual spectra).
nameext The name extension to use for saved spectra
outputdir Directory to save spectra to
clobber Overwrite spectra if they already exists
variance_cube_file File containing variance cube of data to be used to estimate nois on 1D spectrum
variance_cube_ext Extension of variance cube to use
source_model_cube_file The source model cube defining the individual sources
source_cube_ext Extension of source model cube file that contins source models
model_cube_ext Extension of model cube file that contains model
layer_scale_ext Extension of model cube file that contains the layer scales
data_cube_file File containing original data cube used for extraction of aperture spectra
verbose
--- EXAMPLE OF USE ---
"""
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Loading data needed for spectral assembly')
model_cube = afits.open(model_cube_file)[model_cube_ext].data
model_cube_hdr = afits.open(model_cube_file)[model_cube_ext].header
layer_scale_arr = afits.open(model_cube_file)[layer_scale_ext].data
if variance_cube_file is not None:
stddev_cube = np.sqrt(afits.open(variance_cube_file)[variance_cube_ext].data) # turn varinace into standard deviation
source_model_cube = afits.open(source_model_cube_file)[source_cube_ext].data
else:
stddev_cube = None
source_model_cube = None
Nsources = layer_scale_arr.shape[0]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if data_cube_file is not None:
if verbose: print(' - Loading data cube ')
data_cube = afits.open(data_cube_file)[model_cube_ext].data
else:
data_cube = None
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if source_association_dictionary is None:
if verbose: print(' - Building default source association dictionary ' \
'(determining what sources are combined into objects), i.e., one source per object ')
sourcIDs_dic = collections.OrderedDict()
for oo in np.arange(int(Nsources)):
sourcIDs_dic[str(oo)] = [oo]
else:
sourcIDs_dic = source_association_dictionary
Nobj = len(list(sourcIDs_dic.keys()))
if verbose: print(' - Found '+str(Nobj)+' objects to generate spectra for in source_association_dictionary ')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Assembling wavelength vector for spectra ')
wavelengths = np.arange(model_cube_hdr['NAXIS3'])*model_cube_hdr['CD3_3']+model_cube_hdr['CRVAL3']
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
specfiles = []
for oo, key in enumerate(sourcIDs_dic.keys()):
obj_cube_hdr = model_cube_hdr.copy()
try:
specid = str("%.10d" % int(key))
except:
specid = str(key)
specname = outputdir+nameext+'_'+specid+'.fits'
specfiles.append(specname)
sourceIDs = sourcIDs_dic[key]
obj_cube_hdr.append(('OBJID ',specid ,'ID of object'),end=True)
obj_cube_hdr.append(('SRCIDS ',str(sourceIDs) ,'IDs of sources combined in object'),end=True)
if verbose:
infostr = ' - Extracting spectrum '+str("%6.f" % (oo+1))+' / '+str("%6.f" % Nobj)
sys.stdout.write("%s\r" % infostr)
sys.stdout.flush()
sourceoutput = tes.extract_spectrum(sourceIDs,layer_scale_arr,wavelengths,noise_cube=stddev_cube,
source_model_cube=source_model_cube, data_cube=data_cube,
specname=specname,obj_cube_hdr=obj_cube_hdr,clobber=clobber,verbose=True)
if verbose: print('\n - Done extracting spectra. Returning list of fits files generated')
return specfiles
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def extract_spectrum(sourceIDs,layer_scale_arr,wavelengths,noise_cube=None,source_model_cube=None,
specname='tdose_extract_spectra_extractedspec.fits',obj_cube_hdr=None,data_cube=None,
clobber=False,verbose=True):
"""
Extracting a spectrum based on the layer scale image from the model cube provided a list of sources to combine.
Noise is estimated from the noise cube (of the data)
If all layer_scales are 1 a data_cube for the extractions is expected
--- INPUT ---
sourceIDs The source IDs to combine into spectrum
layer_scale_arr Layer scale array (or image) produced when generating the model cube
fractional flux belonging to the source in each pixel
wavelengths Wavelength vector to use for extracted 1D spectrum.
noise_cube Cube with uncertainties (sqrt(variance)) of data cube to be used for estimating 1D uncertainties
To estimate S/N and 1D noise, providing a source model cube is required
source_model_cube Source model cube containing the model cube for each individual source seperately
Needed in order to estimate noise from noise-cube
specname Name of file to save spectrum to
obj_cube_hdr Provide a template header to save the object cube (from combining the individual source cube)
as an extension to the extracted spectrum
data_cube In case all layers scales are 1, it is assumed that the source_model_cube contains a mask for the
spectral extraction, which will then be performed on this data_cube.
clobber To overwrite existing files set clobber=True
verbose Toggle verbosity
--- EXAMPLE OF USE ---
"""
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Checking shape of wavelengths and layer_scale_arr')
if wavelengths.shape[0] != layer_scale_arr.shape[1]:
sys.exit(' ---> Shape of wavelength vector ('+str(wavelengths.shape)+
') and wavelength dimension of layer scale array ('+
layer_scale_arr.shape[1].shape+') do not match.')
else:
if verbose: print(' dimensions match; proceeding...')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Checking all sources have spectra in layer_scale_arr')
maxsource = np.max(sourceIDs)
if maxsource >= layer_scale_arr.shape[0]:
sys.exit(' ---> Sources in list '+str(str(sourceIDs))+
' not available among '+str(layer_scale_arr.shape[0])+' sources in layer_scale_arr.')
else:
if verbose: print(' All sources exist in layer_scale_arr; proceeding...')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Assembling object spectrum from source scaling')
source_ent = np.asarray(sourceIDs).astype(int)
if (layer_scale_arr == 1).all():
if verbose: print(' - All layer scales are 1; assuming source model cube contain mask for spectral extraction')
object_cube = np.sum(np.abs(source_model_cube[source_ent,:,:]),axis=0)
if data_cube is None:
sys.exit(' ---> Did not find a data cube to extrac spectra from as expected')
object_mask = (object_cube == 0) # masking all zeros in object mask
invalid_mask = np.ma.masked_invalid(data_cube).mask
comb_mask = (invalid_mask | object_mask)
spec_1D_masked = np.sum(np.sum( np.ma.array(data_cube,mask=comb_mask) ,axis=1),axis=1)
spec_1D = spec_1D_masked.filled(fill_value=0.0)
if noise_cube is not None:
if verbose: print(' Calculating noise as d_spec_k = sqrt( SUMij d_pix_ij**2 ), i.e., as the sqrt of variances summed')
invalid_mask_noise = np.ma.masked_invalid(noise_cube).mask
comb_mask = (comb_mask | invalid_mask_noise)
variance_1D_masked = np.ma.array(noise_cube,mask=comb_mask)**2
noise_1D_masked = np.sqrt( np.sum( np.sum( variance_1D_masked, axis=1), axis=1) )
noise_1D = noise_1D_masked.filled(fill_value=np.nan)
if verbose: print(' Generating S/N vector')
SN_1D = spec_1D / noise_1D
else:
if verbose: print(' - No "noise_cube" provided. Setting all errors and S/N values to NaN')
SN_1D = np.zeros(spec_1D.shape)*np.NaN
noise_1D = np.zeros(spec_1D.shape)*np.NaN
else:
if verbose: print(' - Some layer scales are different from 1; hence assembling spectra using layer scales')
if len(source_ent) < 1:
spec_1D = layer_scale_arr[source_ent,:]
else:
spec_1D = np.sum( layer_scale_arr[source_ent,:],axis=0)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if noise_cube is not None:
if verbose: print(' - Estimate S/N at each wavelength for 1D spectrum (see Eq. 16 of Kamann+2013)')
if verbose: print(' Estimating fraction of flux in each pixel wrt. total flux in each layer')
object_cube = np.sum((source_model_cube[source_ent,:,:,:]),axis=0) # summing source models for all source IDs
fluxfrac_cube_sents = np.zeros(source_model_cube.shape[1:])
for sent in source_ent:
object_cube_sent = np.sum((source_model_cube[[sent],:,:,:]),axis=0) # getting source model for model 'sent'
fluxscale1D_sent = layer_scale_arr[sent,:]
fluxfrac_cube_sent = object_cube_sent / fluxscale1D_sent[:,None,None]
fluxfrac_cube_sents = fluxfrac_cube_sents + fluxfrac_cube_sent
fluxfrac_cube = fluxfrac_cube_sents / len(source_ent) # renormalizing flux-fraction cube
if verbose: print(' Defining pixel mask (ignoring NaN pixels) ') #+\
# 'and pixels with <'+str(fluxfrac_min)+' of total pixel flux in model cube) '
# pix_mask = (fluxfrac_cube < fluxfrac_min)
invalid_mask1 = np.ma.masked_invalid(fluxfrac_cube).mask
invalid_mask2 = np.ma.masked_invalid(noise_cube).mask
# combining mask making sure all individual mask pixels have True for it to be true in combined mask
comb_mask = (invalid_mask1 | invalid_mask2) # | pix_mask
if verbose: print(' Calculating noise propogated as d_spec_k = 1/sqrt( SUMij (fluxfrac_ij**2 / d_pix_ij**2) )')
squared_ratio = np.ma.array(fluxfrac_cube,mask=comb_mask)**2 / np.ma.array(noise_cube,mask=comb_mask)**2
inv_noise_masked = np.sqrt( np.sum( np.sum( squared_ratio, axis=1), axis=1) )
noise_1D = (1.0/inv_noise_masked).filled(fill_value=0.0)
if verbose: print(' Generating S/N vector')
SN_1D = spec_1D / noise_1D
else:
if verbose: print(' - No "noise_cube" provided. Setting all errors and S/N values to NaN')
SN_1D = np.zeros(spec_1D.shape)*np.NaN
noise_1D = np.zeros(spec_1D.shape)*np.NaN
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Saving extracted 1D spectrum and source cube to \n '+specname)
mainHDU = afits.PrimaryHDU() # primary HDU
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
c1 = afits.Column(name='wave', format='D', unit='ANGSTROMS', array=wavelengths)
c2 = afits.Column(name='flux', format='D', unit='', array=spec_1D)
c3 = afits.Column(name='fluxerror', format='D', unit='', array=noise_1D)
c4 = afits.Column(name='s2n', format='D', unit='', array=SN_1D)
coldefs = afits.ColDefs([c1,c2,c3,c4])
th = afits.BinTableHDU.from_columns(coldefs) # creating default header
# writing hdrkeys:'---KEY--', '----------------MAX LENGTH COMMENT-------------'
th.header.append(('EXTNAME ','SPEC1D' ,'cube containing source'),end=True)
head = th.header
tbHDU = afits.BinTableHDU.from_columns(coldefs, header=head)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if obj_cube_hdr is not None:
objHDU = afits.ImageHDU(object_cube)
for hdrkey in list(obj_cube_hdr.keys()):
if not hdrkey in list(objHDU.header.keys()):
objHDU.header.append((hdrkey,obj_cube_hdr[hdrkey],obj_cube_hdr.comments[hdrkey]),end=True)
try:
objHDU.header.append(('EXTNAMEC',objHDU.header['EXTNAME'] ,'EXTNAME of original source cube'),end=True)
del objHDU.header['EXTNAME']
except:
pass
objHDU.header.append(('EXTNAME ','SOURCECUBE' ,'cube containing source'),end=True)
hdulist = afits.HDUList([mainHDU,tbHDU,objHDU])
else:
hdulist = afits.HDUList([mainHDU,tbHDU])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
hdulist.writeto(specname, overwrite=clobber)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
return wavelengths, spec_1D, noise_1D, object_cube
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def extract_spectra_viasourcemodelcube(datacube,sourcemodelcube,wavelengths,speclist,specids='None',outputdir='./',
noisecube=False,sourcemodel_hdr='None',verbose=True):
"""
Wrapper for tes.extract_spectrum_viasourcemodelcube() to extract mutliple spectra
--- INPUT ----
datacube Datacube to extract spectra from
sourcemodelcube Cube containing the source models for each object used as "extraction cube"
Dimensions should be [Nsources,datacube.shape]
wavelengths Wavelength vector to use for extracted 1D spectrum.
speclist List of spectra to extract. Indexes corresponding to the source models in the
sourcemodlecube
specids List of IDs to use in naming of output for source models referred to in "speclist"
outputdir Directory to store spectra to
noisecube Cube with uncertainties (sqrt(variance)) of data cube to be used in extraction
souremodel_hdr If not 'None' provide a basic fits header for the source model cubes extracted
and they will be appended to the individual output fits file containing the extracted
spectra.
verbose Toggle verbosity
--- EXAMPLE OF USE ---
"""
if verbose: print(' - Check that source models indicated are present in source model cube ')
specnames = []
Nmodels = sourcemodelcube.shape[0]
maxobj = np.max(speclist)
if maxobj >= Nmodels:
sys.exit(' ---> Object model "'+str(maxobj)+'" is not included in source model cube (models start at 0)')
else:
if verbose: print(' All object models appear to be included in the '+str(Nmodels)+' source models found in cube')
if datacube.shape != sourcemodelcube[0].shape:
sys.exit(' ---> Shape of datacube ('+str(datacube.shape)+') and shape of source models ('+
sourcemodelcube[0].shape+') do not match.')
sourcemodel_sum = np.sum(sourcemodelcube,axis=0)
for ss, spec in enumerate(speclist):
if specids == 'None':
specid = spec
else:
specid = specids[ss]
specname = outputdir+'tdose_spectrum_'+str("%.12d" % specid)+'.fits'
specnames.append(specname)
sourcemodel = sourcemodelcube[spec,:,:,:]
sourceweights = sourcemodel/sourcemodel_sum # fractional flux of model for given source in each pixel
sourcemodel_hdr.append(('OBJMODEL',spec ,'Source model number in parent source model cube'),end=True)
sourcemodel_hdr.append(('OBJID ',specid ,'ID of source'),end=True)
if verbose:
infostr = ' - Extracting spectrum '+str("%6.f" % (spec+1))+' / '+str("%6.f" % len(speclist))
sys.stdout.write("%s\r" % infostr)
sys.stdout.flush()
sourceoutput = tes.extract_spectrum_viasourcemodelcube(datacube,sourceweights,wavelengths,specname=specname,
noisecube=noisecube,spec1Dmethod='sum',
sourcecube_hdr=sourcemodel_hdr,verbose=verbose)
if verbose: print('\n - Done extracting spectra. Returning list of fits files containing spectra')
return specnames
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def extract_spectrum_viasourcemodelcube(datacube,sourceweights,wavelengths,
specname='tdose_extract_spectra_extractedspec.fits',
noisecube=None,spec1Dmethod='sum',sourcecube_hdr='None',verbose=True):
"""
Extracting a spectrum from a data cube given a source model (cube) to be used as 'extraction cube'
--- INPUT ---
datacube Datacube to extract spectra from
sourceweights Weights from source model to use as "extraction cube". The weights should contain the
fractional flux belonging to the source in each pixel
wavelengths Wavelength vector to use for extracted 1D spectrum.
specname Name of spectrum to generate
noisecube Cube with uncertainties (sqrt(variance)) of data cube to be used in extraction
spec1Dmethod Method used to extract 1D spectrum from source cube with
sourcecube_hdr If not 'None' provide a fits header for the source cube and it ill be appended to the
output fits file.
verbose Toggle verbosity
--- EXAMPLE OF USE ---
"""
if verbose: print(' - Checking shape of data and source model cubes')
if datacube.shape != sourceweights.shape:
sys.exit(' ---> Shape of datacube ('+str(datacube.shape)+') and source weights ('+
sourceweights.shape+') do not match.')
else:
if verbose: print(' dimensions match; proceeding with extraction ')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Applying weights to "datacube" to obtain source cube ')
sourcecube = datacube*sourceweights
if noisecube is not None:
if verbose: print(' - Using "noisecube" for error propagation ')
datanoise = noisecube
else:
if verbose: print(' - No "noisecube" provided. Setting all errors to 1')
datanoise = np.ones(datacube.shape)
if verbose: print(' - Assuming uncertainty on source weights equals the datanoise when propgating errors')
sourceweights_err = datanoise
sourcecube_err = sourcecube * np.sqrt( (datanoise/datacube)**2 + (sourceweights_err/sourceweights)**2 )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Generating 1D spectrum from source cube via:')
spec_wave = wavelengths
maskinvalid = np.ma.masked_invalid(sourcecube * sourcecube_err).mask
if spec1Dmethod == 'sum':
if verbose: print(' Simple summation of fluxes in sourcecube.')
spec_flux = np.sum(np.sum(np.ma.array(sourcecube,mask=maskinvalid),axis=1),axis=1).filled()
if verbose: print(' Errors are propagated as sum of squares.')
spec_err = np.sqrt( np.sum( np.sum(np.ma.array(sourcecube_err,mask=maskinvalid)**2,axis=1),axis=1) ).filled()
elif spec1Dmethod == 'sum_SNweight':
pdb.set_trace()
else:
sys.exit(' ---> The chosen spec1Dmethod ('+str(spec1Dmethod)+') is invalid')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: print(' - Saving extracted 1D spectrum and source cube to \n '+specname)
mainHDU = afits.PrimaryHDU() # primary HDU
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
c1 = afits.Column(name='wave', format='D', unit='ANGSTROMS', array=spec_wave)
c2 = afits.Column(name='flux', format='D', unit='', array=spec_flux)
c3 = afits.Column(name='fluxerror', format='D', unit='', array=spec_err)
coldefs = afits.ColDefs([c1,c2,c3])
th = afits.BinTableHDU.from_columns(coldefs) # creating default header
# writing hdrkeys:'---KEY--', '----------------MAX LENGTH COMMENT-------------'
th.header.append(('EXTNAME ','SPEC1D' ,'cube containing source'),end=True)
th.header.append(('SPECMETH' , spec1Dmethod ,'Method used for spectral extraction'),end=True)
head = th.header
tbHDU = afits.BinTableHDU.from_columns(coldefs, header=head)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if sourcecube_hdr != 'None':
sourceHDU = afits.ImageHDU(sourcecube) # default HDU with default minimal header
for hdrkey in list(sourcecube_hdr.keys()):
if not hdrkey in list(sourceHDU.header.keys()):
sourceHDU.header.append((hdrkey,sourcecube_hdr[hdrkey],sourcecube_hdr.comments[hdrkey]),end=True)
sourceHDU.header.append(('EXTNAME ','SOURCECUBE' ,'cube containing source'),end=True)
hdulist = afits.HDUList([mainHDU,tbHDU,sourceHDU])
else:
hdulist = afits.HDUList([mainHDU,tbHDU])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
hdulist.writeto(specname, overwrite=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
return sourcecube, sourcecube_err, spec_wave, spec_flux, spec_err
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def plot_1Dspecs(filelist,plotname='./tdose_1Dspectra.pdf',colors=None,labels=None,plotSNcurve=False,
tdose_wavecol='wave',tdose_fluxcol='flux',tdose_errcol='fluxerror',
simsources=None,simsourcefile='/Users/kschmidt/work/TDOSE/mock_cube_sourcecat161213_all.fits',
sim_cube_dim=None,comparisonspecs=None,comp_colors=['blue'],comp_labels=None,
comp_wavecol='WAVE_AIR',comp_fluxcol='FLUX',comp_errcol='FLUXERR',
xrange=None,yrange=None,showspecs=False,shownoise=True,
skyspecs=None,sky_colors=['red'],sky_labels=['sky'],
sky_wavecol='lambda',sky_fluxcol='data',sky_errcol='stat',
showlinelists=None,linelistcolors=['gray'],smooth=0,ylog=False,
plotratio=False,
verbose=True,pubversion=False):
"""
Plots of multiple 1D spectra
--- INPUT ---
filelist List of spectra filenames to plot
plotname Name of plot to generate
colors Colors of the spectra in filelist to use
labels Labels of the spectra in filelist to use
plotSNcurve Show signal-to-noise curve instead of flux spectra
tdose_wavecol Wavelength column of the spectra in filelist
tdose_fluxcol Flux column of the spectra in filelist
tdose_errcol Flux error column of the spectra in filelist
simsources To plot simulated sources provide ids here
simsourcefile Source file with simulated sources to plot
sim_cube_dim Dimensions of simulated cubes
comparisonspecs To plot comparison spectra provide the filenames of those here
comp_colors Colors of the spectra in comparisonspecs list to use
comp_labels Labels of the spectra in comparisonspecs list to use
comp_wavecol Wavelength column of the spectra in comparisonspecs list
comp_fluxcol Flux column of the spectra in comparisonspecs list
comp_errcol Flux error column of the spectra in comparisonspecs list
xrange Xrange of plot
yrange Yrange of plot
showspecs To show plot instead of storing it to disk set showspecs=True
shownoise To add noise envelope around spectrum set shownoise=True
skyspecs To plot sky spectra provide the filenames of those here
sky_colors Colors of the spectra in skyspecs list to use
sky_labels Labels of the spectra in skyspecs list to use
sky_wavecol Wavelength column of the spectra in skyspecs list
sky_fluxcol Flux column of the spectra in skyspecs list
sky_errcol Flux error column of the spectra in skyspecs list
showlinelists To show line lists provide a list of arrays of dimension (Nlines,2) where each row in the
arrays contains [waveobs, name], where 'waveobs' is the observed wavelengths and 'name' is
a string with the name of each of the Nlines postions to mark on the spectrum.
linelistcolors List of colors for line lists provided in showlinelists
smooth To smooth the spectra, provide sigma of the 1D gaussian smoothing kernel to apply.
For smooth = 0, no smoothing is performed.
ylog To plot y-axis in log scale set to true
plotratio To plot the ratio between the main spectrum and the comparison spectra instead of the actual
spectra, set this keyword to true.
verbose Toggle verbosity
pubversion Generate more publication friendly version of figure
"""
if len(filelist) == 1:
if verbose: print(' - Plotting data from '+filelist[0])
else:
if verbose: print(' - Plotting data from filelist ')
if pubversion:
fig = plt.figure(figsize=(6, 3))
fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.15, right=0.95, bottom=0.18, top=0.83)
Fsize = 12
else:
fig = plt.figure(figsize=(10, 3))
fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.06, right=0.81, bottom=0.15, top=0.95)
Fsize = 10
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Looking for flux units in spectra
bunit = 'BUNIT FLUX' # Default BUNIT
for unitspec in filelist:
if bunit == 'BUNIT FLUX':
try:
sourcecubehdr = afits.open(unitspec)['SOURCECUBE'].header
bunit = sourcecubehdr['BUNIT']
except:
try: # Backwards compatibility to TDOSE v2.0 extractions
sourcecubehdr = afits.open(unitspec)[2].header
bunit = sourcecubehdr['BUNIT']
except:
pass
if bunit == 'BUNIT FLUX':
if verbose: print(' - Did not find BUNIT in SOURCECUBE header for any spectra in filelist - are they not from TDOSE?')
if bunit == '10**(-20)*erg/s/cm**2/Angstrom': # Making bunit LaTeXy for MUSE-Wide BUNIT format
bunit = '1e-20 erg/s/cm$^2$/\AA'
else:
bunit = '$'+bunit+'$' # minimizing pronlems with LaTeXing plot axes
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
lthick = 1
plt.rc('text', usetex=True)
plt.rc('font', family='serif',size=Fsize)
plt.rc('xtick', labelsize=Fsize)
plt.rc('ytick', labelsize=Fsize)
plt.clf()
plt.ioff()
#plt.title(plotname.split('TDOSE 1D spectra'),fontsize=Fsize)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
for ff, specfile in enumerate(filelist):
specdat = afits.open(specfile)[1].data
if colors is None:
spec_color = None
else:
spec_color = colors[ff]
if labels is None:
spec_label = specfile
else:
spec_label = labels[ff]
if xrange is not None:
goodent = np.where((specdat[tdose_wavecol] > xrange[0]) & (specdat[tdose_wavecol] < xrange[1]))[0]
if goodent == []:
if verbose: print(' - The chosen xrange is not covered by the input spectrum. Plotting full spectrum')
goodent = np.arange(len(specdat[tdose_wavecol]))
else:
goodent = np.arange(len(specdat[tdose_wavecol]))
if plotSNcurve:
try:
s2ndat = specdat['s2n'][goodent]
except:
s2ndat = specdat[tdose_fluxcol][goodent]/specdat[tdose_errcol][goodent]
if smooth > 0:
s2ndat = snf.gaussian_filter(s2ndat, smooth)
if not plotratio:
plt.plot(specdat[tdose_wavecol][goodent],s2ndat,color=spec_color,lw=lthick, label=spec_label)
ylabel = 'S/N'
else:
plt.plot(specdat[tdose_wavecol][goodent],s2ndat/s2ndat,color=spec_color,lw=lthick, label=None)
ylabel = 'S/N ratio'
#plotname = plotname.replace('.pdf','_S2N.pdf')
else:
fillalpha = 0.30
fluxdat = specdat[tdose_fluxcol][goodent]
errlow = specdat[tdose_fluxcol][goodent]-specdat[tdose_errcol][goodent]
errhigh = specdat[tdose_fluxcol][goodent]+specdat[tdose_errcol][goodent]
if smooth > 0:
fluxdat = snf.gaussian_filter(fluxdat, smooth)
errlow = snf.gaussian_filter(errlow, smooth)
errhigh = snf.gaussian_filter(errhigh, smooth)
if smooth > 0:
fluxdat = snf.gaussian_filter(fluxdat, smooth)
if not plotratio:
if shownoise:
plt.fill_between(specdat[tdose_wavecol][goodent],errlow,errhigh,
alpha=fillalpha,color=spec_color)
plt.plot(specdat[tdose_wavecol][goodent],fluxdat,
color=spec_color,lw=lthick, label=spec_label)
ylabel = tdose_fluxcol
else:
plt.plot(specdat[tdose_wavecol][goodent],fluxdat/fluxdat,
color=spec_color,lw=lthick, label=None)
ylabel = tdose_fluxcol+' ratio '
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if simsources is not None:
sim_total = np.zeros(len(specdat[tdose_wavecol]))
for sourcenumber in simsources:
sourcedat = afits.open(simsourcefile)[1].data
xpos = sourcedat['xpos'][sourcenumber]
ypos = sourcedat['ypos'][sourcenumber]
fluxscale = sourcedat['fluxscale'][sourcenumber]
sourcetype = sourcedat['sourcetype'][sourcenumber]
spectype = sourcedat['spectype'][sourcenumber]
sourcecube = tbmc.gen_source_cube([ypos,xpos],fluxscale,sourcetype,spectype,cube_dim=sim_cube_dim,
verbose=verbose,showsourceimgs=False)
simspec = np.sum( np.sum(sourcecube, axis=1), axis=1)
sim_total = sim_total + simspec
if smooth > 0:
simspec = snf.gaussian_filter(simspec, smooth)
plt.plot(specdat[tdose_wavecol],simspec,'--',color='black',lw=lthick)
plt.plot(specdat[tdose_wavecol],sim_total,'--',color='black',lw=lthick,
label='Sim. spectrum: \nsimsource='+str(simsources))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if comparisonspecs is not None:
for cc, comparisonspec in enumerate(comparisonspecs):
compdat = afits.open(comparisonspec)[1].data
if xrange is not None:
goodent = np.where((compdat[comp_wavecol] > xrange[0]) & (compdat[comp_wavecol] < xrange[1]))[0]
if goodent == []:
if verbose: print(' - The chosen xrange is not covered by the comparison spectrum. Plotting full spectrum')
goodent = np.arange(len(compdat[comp_wavecol]))
else:
goodent = np.arange(len(compdat[comp_wavecol]))
if comp_colors is None:
comp_color = None
else:
comp_color = comp_colors[cc]
if comp_labels is None:
comp_label = comparisonspec
else:
comp_label = comp_labels[cc]
if plotSNcurve:
s2ncompdat = compdat[comp_fluxcol][goodent]/compdat[comp_errcol][goodent]
if smooth > 0:
s2ncompdat = snf.gaussian_filter(s2ncompdat, smooth)
if not plotratio:
plt.plot(compdat[comp_wavecol][goodent],s2ncompdat,
color=comp_color,lw=lthick, label=comp_label)
else:
plt.plot(compdat[comp_wavecol][goodent],s2ndat/s2ncompdat,
color=comp_color,lw=lthick, label=comp_label)
else:
fillalpha = 0.30
fluxcompdat = compdat[comp_fluxcol][goodent]
errlow = compdat[comp_fluxcol][goodent]-compdat[comp_errcol][goodent]
errhigh = compdat[comp_fluxcol][goodent]+compdat[comp_errcol][goodent]
if smooth > 0:
fluxcompdat = snf.gaussian_filter(fluxcompdat, smooth)
errlow = snf.gaussian_filter(errlow, smooth)
errhigh = snf.gaussian_filter(errhigh, smooth)
if not plotratio:
if shownoise:
plt.fill_between(compdat[comp_wavecol][goodent],errlow,errhigh,
alpha=fillalpha,color=comp_color)
plt.plot(compdat[comp_wavecol][goodent],fluxcompdat,
color=comp_color,lw=lthick, label=comp_label)
else:
plt.plot(compdat[comp_wavecol][goodent],fluxdat/fluxcompdat,
color=comp_color,lw=lthick, label=comp_label)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if skyspecs is not None:
for ss, skyspec in enumerate(skyspecs):
skydat = afits.open(skyspec)[1].data
if xrange is not None:
goodent = np.where((skydat[sky_wavecol] > xrange[0]) & (skydat[sky_wavecol] < xrange[1]))[0]
if goodent == []:
if verbose: print(' - The chosen xrange is not covered by the sky spectrum. Plotting full spectrum')
goodent = np.arange(len(skydat[sky_wavecol]))
else:
goodent = np.arange(len(skydat[sky_wavecol]))
if sky_colors is None:
sky_color = None
else:
sky_color = sky_colors[ss]
if sky_labels is None:
sky_label = skyspec
else:
sky_label = sky_labels[ss]
if plotSNcurve:
s2nsky = skydat[sky_fluxcol][goodent]/skydat[sky_errcol][goodent]
if smooth > 0:
s2nsky = snf.gaussian_filter(s2nsky, smooth)
plt.plot(skydat[sky_wavecol][goodent],s2nsky,
color=sky_color,lw=lthick, label=sky_label)
else:
fillalpha = 0.30
fluxsky = skydat[sky_fluxcol][goodent]
errlow = skydat[sky_fluxcol][goodent]-skydat[sky_errcol][goodent]
errhigh = skydat[sky_fluxcol][goodent]+skydat[sky_errcol][goodent]
if smooth > 0:
fluxsky = snf.gaussian_filter(fluxsky, smooth)
errlow = snf.gaussian_filter(errlow, smooth)
errhigh = snf.gaussian_filter(errhigh, smooth)
if shownoise:
plt.fill_between(skydat[sky_wavecol][goodent],errlow,errhigh,
alpha=fillalpha,color=sky_color)
plt.plot(skydat[sky_wavecol][goodent],fluxsky,
color=sky_color,lw=lthick, label=sky_label)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if xrange is None:
xvals = [4800,9300]
else:
xvals = xrange
plt.plot(xvals,[0,0],'--k',lw=lthick)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
plt.xlabel('Wavelength [\AA]', fontsize=Fsize)
if pubversion:
if plotSNcurve:
ylabel = 'Signal-to-Noise'
else:
ylabel = 'Flux ['+str(bunit)+']'
if plotratio:
ylabel = ylabel+' ratio'
plt.ylabel(ylabel, fontsize=Fsize)
if ylog:
plt.yscale('log')
if yrange is not None:
plt.ylim(yrange)
if xrange is not None:
plt.xlim(xrange)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if showlinelists is not None:
for sl, showlinelist in enumerate(showlinelists):
ymin, ymax = plt.ylim()
xmin, xmax = plt.xlim()
for ww, wave in enumerate(showlinelist[:,0]):
wave = float(wave)
if (wave < xmax) & (wave > xmin):
plt.plot([wave,wave],[ymin,ymax],linestyle='--',color=linelistcolors[sl],lw=lthick)
plt.text(wave,ymin+1.03*np.abs([ymax-ymin]),showlinelist[:,1][ww],color=linelistcolors[sl], fontsize=Fsize-2., ha='center')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if pubversion:
leg = plt.legend(fancybox=True, loc='upper center',prop={'size':Fsize-2},ncol=4,numpoints=1,
bbox_to_anchor=(0.44, 1.27)) # add the legend
else:
leg = plt.legend(fancybox=True, loc='upper right',prop={'size':Fsize},ncol=1,numpoints=1,
bbox_to_anchor=(1.25, 1.03)) # add the legend
leg.get_frame().set_alpha(0.7)
if showspecs:
if verbose: print(' Showing plot (not saving to file)')
plt.show()
else:
if verbose: print(' Saving plot to',plotname)
plt.savefig(plotname)
plt.clf()
plt.close('all')
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def plot_histograms(datavectors,plotname='./tdose_cubehist.pdf',colors=None,labels=None,bins=None,
xrange=None,yrange=None,verbose=True,norm=True,ylog=True):
"""
Plot histograms of a set of data vectors.
--- INPUT ---
datavectors Set of data vectors to plot histograms of
plotname Name of plot to generate
colors Colors to use for histograms
labels Labels for the data vectors
bins Bins to use for histograms. Can be generated with np.arange(minval,maxval+binwidth,binwidth)
xrange Xrange of plot
yrange Yrange of plot
verbose Toggle verbosity
norm Noramlize the histograms
ylog Use a logarithmic y-axes when plotting
"""
Ndat = len(datavectors)
if verbose: print(' - Plotting histograms of N = '+str(Ndat)+' data vectors')
if colors is None:
colors = ['blue']*Ndat
if labels is None:
labels = ['data vector no. '+str(ii+1) for ii in np.arange(Ndat)]
if bins is None:
bins = np.arange(-100,102,2)
fig = plt.figure(figsize=(10, 3))
fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.08, right=0.81, bottom=0.1, top=0.95)
Fsize = 10
lthick = 1
plt.rc('text', usetex=True)
plt.rc('font', family='serif',size=Fsize)
plt.rc('xtick', labelsize=Fsize)
plt.rc('ytick', labelsize=Fsize)
plt.clf()
plt.ioff()
#plt.title(plotname.split('TDOSE 1D spectra'),fontsize=Fsize)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
for dd, datavec in enumerate(datavectors):
hist = plt.hist(datavec[~np.isnan(datavec)],color=colors[dd],bins=bins,histtype="step",lw=lthick,
label=labels[dd],normed=norm)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if yrange is None:
yvals = [1e-5,1e8]
else:
yvals = yrange
plt.plot([0,0],yvals,'--k',lw=lthick)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
plt.xlabel('', fontsize=Fsize)
plt.ylabel('\#', fontsize=Fsize)
if yrange is not None:
plt.ylim(yrange)
if xrange is not None:
plt.xlim(xrange)
if ylog:
plt.yscale('log')
leg = plt.legend(fancybox=True, loc='upper right',prop={'size':Fsize},ncol=1,numpoints=1,
bbox_to_anchor=(1.25, 1.03)) # add the legend
leg.get_frame().set_alpha(0.7)
if verbose: print(' Saving plot to',plotname)
plt.savefig(plotname)
plt.clf()
plt.close('all')
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = | mit | 2,141,411,611,090,954,000 | 50.925355 | 143 | 0.5421 | false | 3.423483 | false | false | false |
mbeyeler/pulse2percept | examples/implants/plot_custom_electrode_array.py | 1 | 6434 | """
============================================================================
Creating your own electrode array
============================================================================
This example shows how to create a new
:py:class:`~pulse2percept.implants.ElectrodeArray` object.
As the base class for all electrode arrays in pulse2percept, the
:py:class:`~pulse2percept.implants.ElectrodeArray` class provides a blue print
for the functionality that every electrode array should have.
First and foremost, an :py:class:`~pulse2percept.implants.ElectrodeArray`
contains a collection of :py:class:`~pulse2percept.implants.Electrode` objects,
and new electrodes can be added via the
:py:func:`~pulse2percept.implants.ElectrodeArray.add_electrodes` method.
In addition, individual electrodes in the array can be accessed by indexing
using either their pre-assigned names (a string) or their place in the array
(integer).
Arranging electrodes in a circle
--------------------------------
In this example, we want to build a new type of electrode array that arranges
all of its electrodes in a circle.
To do this, we need to create a new class ``CircleElectrodeArray`` that is
a child of :py:class:`~pulse2percept.implants.ElectrodeArray`:
"""
##############################################################################
# .. code-block:: python
#
# class CircleElectrodeArray(ElectrodeArray):
# """Electrodes arranged in a circle"""
# ...
#
# This way, the ``CircleElectrodeArray`` class can access all public methods
# of :py:class:`~pulse2percept.implants.ElectrodeArray`.
#
# The constructor then has the job of creating all electrodes in the array
# and placing them at the appropriate location; for example, by using the
# :py:func:`~pulse2percept.implants.ElectrodeArray.add_electrodes` method.
#
# The constructor of the class should accept a number of arguments:
#
# - ``n_electrodes``: how many electrodes to arrange in a circle
# - ``radius``: the radius of the circle
# - ``x_center``: the x-coordinate of the center of the circle
# - ``y_center``: the y-coordinate of the center of the circle
#
# For simplicity, we will use :py:class:`~pulse2percept.implants.DiskElectrode`
# objects of a given radius (100um), although it would be relatively straightforward
# to allow the user to choose the electrode type.
from pulse2percept.implants import ElectrodeArray, DiskElectrode
import collections as coll
import numpy as np
class CircleElectrodeArray(ElectrodeArray):
def __init__(self, n_electrodes, radius, x_center, y_center):
"""Electrodes arranged in a circle
Electrodes will be named 'A0', 'A1', ...
Parameters
----------
n_electrodes : int
how many electrodes to arrange in a circle
radius : float
the radius of the circle (microns)
x_center, y_center : float
the x,y coordinates of the center of the circle (microns),
where (0,0) is the center of the fovea
"""
# The job of the constructor is to create the electrodes. We start
# with an empty collection:
self._electrodes = coll.OrderedDict()
# We then generate a number `n_electrodes` of electrodes, arranged on
# the circumference of a circle:
for n in range(n_electrodes):
# Angular position of the electrode:
ang = 2.0 * np.pi / n_electrodes * n
# Create the disk electrode:
electrode = DiskElectrode(x_center + np.cos(ang) * radius,
y_center + np.sin(ang) * radius, 0, 100)
# Add the electrode to the collection:
self.add_electrode('A' + str(n), electrode)
##############################################################################
# Using the CircleElectrodeArray class
# ------------------------------------
#
# To use the new class, we need to specify all input arguments and pass them
# to the constructor:
n_electrodes = 10
radius = 1000 # radius in microns
x_center = 0 # x-coordinate of circle center (microns)
y_center = 0 # y-coordinate of circle center (microns)
# Create a new instance of type CircleElectrodeArray:
earray = CircleElectrodeArray(n_electrodes, radius, x_center, y_center)
print(earray)
##############################################################################
# Individual electrodes can be accessed by their name or integer index:
earray[0]
earray['A0']
earray[0] == earray['A0']
##############################################################################
# Visualizing the electrode array
# -------------------------------
#
# Electrode arrays come with their own plotting method:
earray.plot()
##############################################################################
# By default, the method will use the current Axes object or create a new one
# if none exists. Alternatively, you can specify ``ax=`` yourself.
#
# Extending the CircleElectrodeArray class
# ----------------------------------------
#
# Similar to extending :py:class:`~pulse2percept.implants.ElectrodeArray` for
# our purposes, we can extend ``CircleElectrodeArray``.
#
# To add new functionality, we could simply edit the above constructor.
# However, nobody stops us from creating our own hierarchy of classes.
#
# For example, we could build a ``FlexibleCircleElectrodeArray`` that allows us
# to remove individual electrodes from the array:
class FlexibleCircleElectrodeArray(CircleElectrodeArray):
def remove(self, name):
"""Deletean electrode from the array
Parameters
----------
name : int, string
the name of the electrode to be removed
"""
del self.electrodes[name]
##############################################################################
# Note how we didn't even specify a constructor.
# By default, the class inherits all (public) functionality from its parent,
# including its constructor. So the following line will create the same
# electrode array as above:
flex_earray = FlexibleCircleElectrodeArray(
n_electrodes, radius, x_center, y_center)
print(flex_earray)
##############################################################################
# A single electrode can be removed by passing its name to the ``remove``
# method:
# Remove electrode 'A1'
flex_earray.remove('A1')
# Replot the implant:
flex_earray.plot()
| bsd-3-clause | 1,638,808,666,672,022,800 | 35.765714 | 84 | 0.614081 | false | 3.889964 | false | false | false |
asterix135/whoshouldivotefor | explorer/migrations/0008_auto_20170627_0253.py | 1 | 1741 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-27 06:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('explorer', '0007_auto_20170626_0543'),
]
operations = [
migrations.CreateModel(
name='IssueCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(max_length=100, unique=True)),
],
),
migrations.AlterField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='explorer.Question'),
),
migrations.AlterField(
model_name='poll',
name='election',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='polls', to='explorer.Election'),
),
migrations.RemoveField(
model_name='question',
name='poll',
),
migrations.AddField(
model_name='question',
name='poll',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='explorer.Poll'),
preserve_default=False,
),
migrations.AddField(
model_name='question',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='explorer.IssueCategory'),
preserve_default=False,
),
]
| mit | 2,262,988,717,029,549,800 | 34.530612 | 138 | 0.589316 | false | 4.23601 | false | false | false |
Geoportail-Luxembourg/geoportailv3 | geoportal/geoportailv3_geoportal/scripts/db2es.py | 1 | 3233 | # -*- coding: utf-8 -*-
from pyramid.paster import bootstrap
import psycopg2
from psycopg2.extras import DictCursor
import sys
import getopt
import json
from elasticsearch import helpers
from elasticsearch.helpers import BulkIndexError
from elasticsearch.exceptions import ConnectionTimeout
from geoportailv3_geoportal.lib.search import get_elasticsearch, get_index, \
ensure_index
"""
Utility functions for importing data into Elasticsearch from database
"""
def get_cursor():
source_conf = {
'database': 'search',
'user': 'postgres',
'password': '',
'host': 'luigi11',
'port': '5432'
}
conn = psycopg2.connect(**source_conf)
cursor = conn.cursor(cursor_factory=DictCursor)
query = "Select *, ST_AsGeoJSON(ST_Transform(\"searchLayer\".geom,4326)) as geom_4326 \
from public.\"searchLayer\" ;"
cursor.execute(query)
return cursor
def update_document(index, type, obj_id, obj=None):
doc = {
"_index": index,
"_type": "poi",
"_id": obj_id,
}
doc['_source'] = {}
doc['_source']['ts'] = json.loads(obj['geom_4326'])
doc['_source']['object_id'] = obj_id
doc['_source']['fk'] = obj['fk']
doc['_source']['object_type'] = 'poi'
doc['_source']['layer_name'] = obj['type']
doc['_source']['label'] = obj['label']
doc['_source']['role_id'] = 1
doc['_source']['public'] = True
return doc
def statuslog(text):
sys.stdout.write(text)
sys.stdout.flush()
def main():
env = bootstrap('development.ini')
request = env['request']
try:
opts, args = getopt.getopt(sys.argv[1:], 'ri', ['reset', 'index'])
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
index = False
reset = False
for o, a in opts:
if o in ('-r', '--reset'):
statuslog('\rResetting Index')
reset = True
if o in ('-i', '--index'):
statuslog('\rChecking Index')
index = True
import time
index_name = get_index(request) + '_' + time.strftime("%Y%m%d")
ensure_index(get_elasticsearch(request), index_name, reset)
if index is True:
statuslog("\rCreating Database Query ")
c = get_cursor()
counter = 1
while True:
multiple = 250
results = c.fetchmany(multiple)
doc_list = []
for result in results:
doc = update_document(get_index(request),
'poi',
result['id'],
result)
doc_list.append(doc)
statuslog("\rIndexed Elements: %i" % int(counter))
counter = counter + 1
try:
helpers.bulk(client=get_elasticsearch(request),
actions=doc_list,
chunk_size=multiple,
raise_on_error=True)
except (BulkIndexError, ConnectionTimeout) as e:
print("\n {}".format(e))
if not results:
statuslog("\n")
break
if __name__ == '__main__':
main()
| mit | 1,212,416,682,987,955,200 | 28.390909 | 91 | 0.534488 | false | 4.051378 | false | false | false |
dlsun/symbulate | symbulate/index_sets.py | 1 | 1353 | import numbers
class IndexSet(object):
def __init__(self):
return
def __getitem__(self, t):
if t in self:
return t
else:
raise KeyError("Time %.2f not in index set." % t)
def __contains__(self, value):
return False
def __eq__(self, other):
return type(other) == type(self)
class Reals(IndexSet):
def __init__(self):
return
def __contains__(self, value):
try:
return -float("inf") < value < float("inf")
except:
return False
class Naturals(IndexSet):
def __init__(self):
return
def __contains__(self, value):
try:
return (
value >= 0 and
(isinstance(value, numbers.Integral) or
value.is_integer())
)
except:
return False
class DiscreteTimeSequence(IndexSet):
def __init__(self, fs):
self.fs = fs
def __getitem__(self, n):
return n / self.fs
def __contains__(self, value):
return float(value * self.fs).is_integer()
def __eq__(self, index):
return (
isinstance(index, DiscreteTimeSequence) and
(self.fs == index.fs)
)
class Integers(DiscreteTimeSequence):
def __init__(self):
self.fs = 1
| mit | 5,717,099,780,918,022,000 | 18.328571 | 61 | 0.503326 | false | 4.112462 | false | false | false |
MPBAUnofficial/cmsplugin_image_gallery | cmsplugin_image_gallery/models.py | 1 | 4086 | import threading
from cms.models import CMSPlugin
from django.db import models
from django.utils.translation import ugettext_lazy as _
from inline_ordering.models import Orderable
from filer.fields.image import FilerImageField
from django.core.exceptions import ValidationError
import utils
localdata = threading.local()
localdata.TEMPLATE_CHOICES = utils.autodiscover_templates()
TEMPLATE_CHOICES = localdata.TEMPLATE_CHOICES
class GalleryPlugin(CMSPlugin):
def copy_relations(self, oldinstance):
for img in oldinstance.image_set.all():
new_img = Image()
new_img.gallery=self
new_img.inline_ordering_position = img.inline_ordering_position
new_img.src = img.src
new_img.image_url = img.image_url
new_img.title = img.title
new_img.alt = img.alt
new_img.save()
template = models.CharField(max_length=255,
choices=TEMPLATE_CHOICES,
default='cmsplugin_gallery/gallery.html',
editable=len(TEMPLATE_CHOICES) > 1)
def __unicode__(self):
return _(u'%(count)d image(s) in gallery') % {'count': self.image_set.count()}
class Image(Orderable):
def get_media_path(self, filename):
pages = self.gallery.placeholder.page_set.all()
return pages[0].get_media_path(filename)
gallery = models.ForeignKey(
GalleryPlugin,
verbose_name=_("gallery")
)
src = FilerImageField(
null=True,
blank=True,
verbose_name=_("image")
)
image_url = models.URLField(
_("alternative image url"),
verify_exists=True,
null=True,
blank=True,
default=None
)
link_url = models.URLField(
_("link url"),
verify_exists=True,
null=True,
blank=True,
default=None,
help_text=_("url used when user click on the image")
)
src_height = models.PositiveSmallIntegerField(
_("image height"),
editable=False,
null=True
)
src_width = models.PositiveSmallIntegerField(
_("image width"),
editable=False,
null=True
)
title = models.CharField(
_("title"),
max_length=255,
blank=True
)
alt = models.CharField(
_("alt text"),
max_length=80,
blank=True
)
def clean(self):
if not self.src and not self.image_url:
raise ValidationError(_("Image not specified, use image or alternative url to specify the image source"))
def __unicode__(self):
return self.title or self.alt or str(self.pk)
#I don't know why, but insert class Meta in Image cause Orderable class field to doesn't work
#but this small hack solve the problem
Image._meta.get_field('inline_ordering_position').verbose_name = _("Inline ordering position")
Image._meta.verbose_name = _("Image")
Image._meta.verbose_name_plural = _("Images")
| bsd-2-clause | -7,195,980,818,986,026,000 | 37.186916 | 117 | 0.453255 | false | 5.477212 | false | false | false |
eyzhou123/python-games | tetris.py | 1 | 10241 | #tetris.py
from Tkinter import *
import random
def tetrisMousePressed(canvas,event):
tetrisRedrawAll(canvas)
def tetrisKeyPressed(canvas,event):
if event.keysym == "r":
tetrisInit(canvas)
if (canvas.data.isTetrisGameOver == False):
if event.keysym == "Left":
moveFallingPiece(canvas,0,-1)
elif event.keysym == "Right":
moveFallingPiece(canvas,0,+1)
elif event.keysym == "Up":
rotateFallingPiece(canvas)
elif event.keysym == "Down":
moveFallingPiece(canvas,+1,0)
tetrisRedrawAll(canvas)
def tetrisTimerFired(canvas):
if (canvas.data.isTetrisGameOver == False):
if moveFallingPiece(canvas,+1,0) == True:
moveFallingPiece(canvas,+1,0)
else:
placeFallingPiece(canvas)
newFallingPiece(canvas)
removeFullRows(canvas)
if (fallingPieceIsLegal(canvas) == False):
tetrisGameOver(canvas)
tetrisRedrawAll(canvas)
delay = 350 # milliseconds
def f():
tetrisTimerFired(canvas)
canvas.after(delay, f)# pause, then call timerFired again
def tetrisGameOver(canvas):
canvas.data.isTetrisGameOver = True
def tetrisRedrawAll(canvas):
canvas.delete(ALL)
drawTetrisGame(canvas)
drawTetrisScore(canvas)
if (canvas.data.isTetrisGameOver == True):
canvas.create_text(canvas.data.width/2,
canvas.data.height/2,text="Game Over!",font=("Helvetica",
32, "bold"))
def loadTetrisBoard(canvas):
(rows,cols) = (canvas.data.rows,canvas.data.cols)
canvas.data.tetrisBoard = [([canvas.data.emptyColor]*cols) for
row in xrange(rows)]
def drawTetrisGame(canvas):
canvas.create_rectangle(0,0,canvas.data.width,canvas.data.height,
fill = "orange")
drawTetrisBoard(canvas)
drawFallingPiece(canvas)
def drawTetrisBoard(canvas):
tetrisBoard = canvas.data.tetrisBoard
(rows,cols) = (len(tetrisBoard),len(tetrisBoard[0]))
for row in xrange(rows):
for col in xrange(cols):
color = tetrisBoard[row][col]
drawTetrisCell(canvas,row,col,color)
def drawTetrisCell(canvas,row,col,color):
tetrisBoard = canvas.data.tetrisBoard
margin = canvas.data.margin
cellSize = canvas.data.cellSize
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
canvas.create_rectangle(left, top, right, bottom,
fill = "black")
canvas.create_rectangle(left+1,top+1,right-1,bottom-1, #thin outline, use 1
fill = color)
def drawFallingPiece(canvas):
tetrisBoard = canvas.data.tetrisBoard
canvas.data.fallingPieceRows = len(canvas.data.fallingPiece)
canvas.data.fallingPieceCols = len(canvas.data.fallingPiece[0])
for row in xrange(canvas.data.fallingPieceRow,
canvas.data.fallingPieceRow + canvas.data.fallingPieceRows):
for col in xrange(canvas.data.fallingPieceCol,
canvas.data.fallingPieceCol + canvas.data.fallingPieceCols):
if (canvas.data.fallingPiece[row-canvas.data.fallingPieceRow
][col-canvas.data.fallingPieceCol] == True):
drawTetrisCell(canvas,row,col,canvas.data.fallingPieceColor)
def newFallingPiece(canvas):
i = random.randint(0,len(canvas.data.tetrisPieces)-1)
canvas.data.fallingPiece = canvas.data.tetrisPieces[i]
canvas.data.fallingPieceColor = canvas.data.tetrisPieceColors[i]
canvas.data.fallingPieceRow = 0
canvas.data.fallingPieceCol = (canvas.data.cols/2 -
canvas.data.fallingPieceWidth/2)
def moveFallingPiece(canvas,drow,dcol):
canvas.data.fallingPieceRow += drow
canvas.data.fallingPieceCol += dcol
if (fallingPieceIsLegal(canvas) == False):
canvas.data.fallingPieceRow -= drow
canvas.data.fallingPieceCol -= dcol
return False
return True
def rotateFallingPiece(canvas):
fallingPiece = canvas.data.fallingPiece
(fallingPieceRow,fallingPieceCol) = (canvas.data.fallingPieceRow,
canvas.data.fallingPieceCol)
(fallingPieceRows,fallingPieceCols) = (canvas.data.fallingPieceRows,
canvas.data.fallingPieceCols)
(oldCenterRow,oldCenterCol) = fallingPieceCenter(canvas)
(canvas.data.fallingPieceRows,canvas.data.fallingPieceCols) = (
canvas.data.fallingPieceCols,canvas.data.fallingPieceRows)
(newCenterRow,newCenterCol) = fallingPieceCenter(canvas)
canvas.data.fallingPieceRow +=oldCenterRow - newCenterRow
canvas.data.fallingPieceCol += oldCenterCol - newCenterCol
newCols = []
newList = []
for row in xrange(canvas.data.fallingPieceRows):
newCols = []
for col in xrange(canvas.data.fallingPieceCols):
newCols += [canvas.data.fallingPiece[
canvas.data.fallingPieceCols-1-col][row]]
newList += [newCols]
canvas.data.fallingPiece = newList
if (fallingPieceIsLegal(canvas) == False):
canvas.data.fallingPieceRow = fallingPieceRow
canvas.data.fallingPieceCol = fallingPieceCol
canvas.data.fallingPieceRows = fallingPieceRows
canvas.data.fallingPieceCols = fallingPieceCols
canvas.data.fallingPiece = fallingPiece
def fallingPieceCenter(canvas):
centerRow = canvas.data.fallingPieceRow + canvas.data.fallingPieceRows/2
centerCol = canvas.data.fallingPieceCol + canvas.data.fallingPieceCols/2
return (centerRow,centerCol)
def fallingPieceIsLegal(canvas):
tetrisBoard = canvas.data.tetrisBoard
canvas.data.fallingPieceRows = len(canvas.data.fallingPiece)
canvas.data.fallingPieceCols = len(canvas.data.fallingPiece[0])
for row in xrange(canvas.data.fallingPieceRow,
canvas.data.fallingPieceRow + canvas.data.fallingPieceRows):
for col in xrange(canvas.data.fallingPieceCol,
canvas.data.fallingPieceCol + canvas.data.fallingPieceCols):
if (canvas.data.fallingPiece[row-canvas.data.fallingPieceRow
][col-canvas.data.fallingPieceCol] == True):
if ((row<0) or (row >= canvas.data.rows) or (col<0) or
(col >= canvas.data.cols) or (tetrisBoard[row][col]!=
canvas.data.emptyColor)):
return False
return True
def placeFallingPiece(canvas):
tetrisBoard = canvas.data.tetrisBoard
canvas.data.fallingPieceRows = len(canvas.data.fallingPiece)
canvas.data.fallingPieceCols = len(canvas.data.fallingPiece[0])
for row in xrange(canvas.data.fallingPieceRow,
canvas.data.fallingPieceRow + canvas.data.fallingPieceRows):
for col in xrange(canvas.data.fallingPieceCol,
canvas.data.fallingPieceCol + canvas.data.fallingPieceCols):
if (canvas.data.fallingPiece[row-canvas.data.fallingPieceRow
][col-canvas.data.fallingPieceCol] == True):
tetrisBoard[row][col] = canvas.data.fallingPieceColor
def removeFullRows(canvas):
tetrisBoard = canvas.data.tetrisBoard
fullRows = 0
newRow = canvas.data.rows-1
for oldRow in xrange(canvas.data.rows-1,-1,-1):
if (canvas.data.emptyColor in tetrisBoard[oldRow]):
for col in xrange(canvas.data.cols):
tetrisBoard[newRow][col] = tetrisBoard[oldRow][col]
newRow -= 1
else:
fullRows += 1
canvas.data.score += fullRows**2
def drawTetrisScore(canvas):
canvas.create_text(canvas.data.cellSize,canvas.data.cellSize/2,
text="Score: " + str(canvas.data.score),anchor=W,
font=("Helvetica",16, "bold"))
def tetrisInit(canvas):
canvas.data.emptyColor = "blue"
loadTetrisBoard(canvas)
canvas.data.iPiece = [
[ True, True, True, True]
]
canvas.data.jPiece = [
[ True, False, False ],
[ True, True, True]
]
canvas.data.lPiece = [
[ False, False, True],
[ True, True, True]
]
canvas.data.oPiece = [
[ True, True],
[ True, True]
]
canvas.data.sPiece = [
[ False, True, True],
[ True, True, False ]
]
canvas.data.tPiece = [
[ False, True, False ],
[ True, True, True]
]
canvas.data.zPiece = [
[ True, True, False ],
[ False, True, True]
]
canvas.data.tetrisPieces = [canvas.data.iPiece, canvas.data.jPiece,
canvas.data.lPiece, canvas.data.oPiece,canvas.data.sPiece,
canvas.data.tPiece, canvas.data.zPiece ]
canvas.data.tetrisPieceColors = [ "red", "yellow", "magenta",
"pink", "cyan", "green", "orange" ]
canvas.data.fallingPiece = canvas.data.tetrisPieces[
random.randint(0,len(canvas.data.tetrisPieces)-1)]
canvas.data.fallingPieceColor = canvas.data.tetrisPieceColors[
canvas.data.tetrisPieces.index(canvas.data.fallingPiece)]
canvas.data.fallingPieceRow = 0
canvas.data.fallingPieceWidth = len(canvas.data.fallingPiece[0])
canvas.data.fallingPieceCol = (canvas.data.cols/2 -
canvas.data.fallingPieceWidth/2)
canvas.data.fallingPieceRows = len(canvas.data.fallingPiece)
canvas.data.fallingPieceCols = len(canvas.data.fallingPiece[0])
canvas.data.isTetrisGameOver = False
canvas.data.score = 0
tetrisRedrawAll(canvas)
def tetrisRun(rows,cols):
# create the root and the canvas
root = Tk()
margin = 30
cellSize = 30
canvasWidth = 2*margin + cols*cellSize
canvasHeight = 2*margin + rows*cellSize
canvas = Canvas(root, width=canvasWidth, height=canvasHeight)
canvas.pack()
root.resizable(width=0, height=0)
# Set up canvas data and call init
class Struct: pass
canvas.data = Struct()
canvas.data.margin = margin
canvas.data.cellSize = cellSize
canvas.data.rows = rows
canvas.data.cols = cols
canvas.data.width = canvasWidth
canvas.data.height = canvasHeight
tetrisInit(canvas)
# set up events
def f(event): tetrisMousePressed(canvas, event)
root.bind("<Button-1>", f)
def g(event): tetrisKeyPressed(canvas, event)
root.bind("<Key>", g)
tetrisTimerFired(canvas)
# and launch the app
root.mainloop() # This call BLOCKS (so your program waits until you close the window!)
tetrisRun(15,10)
| mit | 5,614,054,969,684,136,000 | 36.375912 | 91 | 0.671224 | false | 3.19532 | false | false | false |
KeserOner/where-artists-share | was/artists/models.py | 1 | 1280 | from django.contrib.auth.models import User
from django.db import models
from django.dispatch.dispatcher import receiver
class Artists(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
artist_image = models.ImageField(
verbose_name="Artist's profile image",
null=True,
blank=True,
unique=True,
upload_to="artist_image/",
)
artist_banner = models.ImageField(
verbose_name="Artist's banner",
unique=True,
null=True,
blank=True,
upload_to="artist_banner/",
)
artist_bio = models.TextField(max_length=500, verbose_name="Artist's biografy")
artist_signature = models.CharField(
max_length=70, verbose_name="Artist's signature"
)
artist_followed = models.ForeignKey(
"self",
on_delete=models.CASCADE,
related_name="artists_followed",
blank=True,
null=True,
)
def __str__(self):
return "Profil de %s" % self.user.username
@receiver(models.signals.pre_delete, sender=Artists)
def delete_images(sender, instance, **kwargs):
if instance.artist_image:
instance.artist_image.delete(False)
if instance.artist_banner:
instance.artist_banner.delete(False)
| mit | 6,011,123,594,670,129,000 | 25.666667 | 83 | 0.646094 | false | 3.855422 | false | false | false |
zinid/mrim | src/protocol.py | 1 | 19357 | from mmptypes import *
import utils
import UserDict
import cStringIO
import socket
import struct
import email
from email.Utils import parsedate
wp_request = {}
wp_request_reversed = {}
for k,v in [(key, locals()[key]) for key in locals().keys() if key.startswith('MRIM_CS_WP_REQUEST_PARAM')]:
wp_request[v] = k
for k,v in wp_request.items():
wp_request_reversed[v] = k
del k,v
message_flags = tuple([v for k,v in locals().items() if k.startswith('MESSAGE_FLAG')])
class MMPParsingError(Exception):
def __init__(self, text, packet):
self.args = text,packet
self.text = text
self.packet = packet
def __str__(self):
return self.text
class MMPHeader(UserDict.UserDict):
def __init__(self,typ=0,dlen=0,seq=0,fromip='0.0.0.0',fromport='0',header=''):
UserDict.UserDict.__init__(self)
self.header = header
self.typ = typ
self.frmt = '5I4s4s16B'
if not self.header:
self['magic'] = CS_MAGIC
self['proto'] = PROTO_VERSION
self['seq'] = seq
self['msg'] = typ
self['from'] = fromip
self['fromport'] = fromport
self['dlen'] = dlen
self['reserved'] = tuple([0 for i in range(16)])
else:
try:
unpacked_header = struct.unpack(self.frmt, self.header)
except struct.error:
raise MMPParsingError("Can't unpack header", self.header)
self['magic'] = unpacked_header[0]
self['proto'] = unpacked_header[1]
self['seq'] = unpacked_header[2]
self['msg'] = unpacked_header[3]
self['dlen'] = unpacked_header[4]
self['from'] = socket.inet_ntoa(unpacked_header[5])
self['fromport'] = socket.inet_ntoa(unpacked_header[6])
self['reserved'] = unpacked_header[7:]
def __str__(self):
if not self.header:
try:
new_header = struct.pack(
self.frmt,
self['magic'],
self['proto'],
self['seq'],
self['msg'],
self['dlen'],
socket.inet_aton(self['from']),
socket.inet_aton(self['fromport']),
*self['reserved']
)
except (struct.error, KeyError):
raise MMPParsingError("Can't pack header", self)
return new_header
else:
return self.header
class MMPBody(UserDict.UserDict):
def __init__(self, typ=0, dict={}, body=''):
UserDict.UserDict.__init__(self)
self.dict = dict
self.body = body
self.typ = typ
if self.body:
self.io = cStringIO.StringIO(body)
self.str2dict(body)
elif self.dict:
self.io = cStringIO.StringIO()
self.update(dict)
def __str__(self):
if self.body:
return self.body
elif self.dict:
return self.dict2str(self.dict)
else:
return ''
def str2dict(self, body):
try:
return self._str2dict(body)
except struct.error:
raise MMPParsingError("Can't unpack body", body)
def dict2str(self, dict):
try:
return self._dict2str(dict)
except (struct.error, KeyError):
raise MMPParsingError("Can't pack body", dict)
def _str2dict(self, body):
if self.typ == MRIM_CS_HELLO_ACK:
self['ping_period'] = self._read_ul()
elif self.typ == MRIM_CS_LOGIN_REJ:
self['reason'] = self._read_lps()
elif self.typ == MRIM_CS_MESSAGE:
self['flags'] = self._read_ul()
self['to'] = self._read_lps()
self['message'] = self._read_lps()
self['rtf-message'] = self.readl_lps()
elif self.typ == MRIM_CS_MESSAGE_ACK:
self['msg_id'] = self._read_ul()
self['flags'] = self._read_ul()
self['from'] = self._read_lps()
self['message'] = self._read_lps()
try:
self['rtf-message'] = self._read_lps()
except struct.error:
self['rtf-message'] = ' '
elif self.typ == MRIM_CS_MESSAGE_RECV:
self['from'] = self._read_lps()
self['msg_id'] = self._read_ul()
elif self.typ == MRIM_CS_MESSAGE_STATUS:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_USER_STATUS:
self['status'] = self._read_ul()
self['user'] = self._read_lps()
elif self.typ == MRIM_CS_LOGOUT:
self['reason'] = self._read_ul()
elif self.typ == MRIM_CS_CONNECTION_PARAMS:
self['ping_period'] = self._read_ul()
elif self.typ == MRIM_CS_ADD_CONTACT:
self['flags'] = self._read_ul()
self['group_id'] = self._read_ul()
self['email'] = self._read_lps()
self['name'] = self._read_lps()
self['phones'] = self._read_ul()
self['text'] = self._read_lps()
elif self.typ == MRIM_CS_ADD_CONTACT_ACK:
self['status'] = self._read_ul()
current_position = self.io.tell()
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
self['contact_id'] = self._read_ul()
else:
return
elif self.typ == MRIM_CS_MODIFY_CONTACT:
self['id'] = self._read_ul()
self['flags'] = self._read_ul()
self['group_id'] = self._read_ul()
self['contact'] = self._read_lps()
self['name'] = self._read_lps()
self['phones'] = self._read_lps()
elif self.typ == MRIM_CS_MODIFY_CONTACT_ACK:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_OFFLINE_MESSAGE_ACK:
self['uidl'] = self._read_uidl()
self['message'] = self._read_lps()
elif self.typ == MRIM_CS_DELETE_OFFLINE_MESSAGE:
self['uidl'] = self._read_uidl()
elif self.typ == MRIM_CS_AUTHORIZE:
self['user'] = self._read_lps()
elif self.typ == MRIM_CS_AUTHORIZE_ACK:
self['user'] = self._read_lps()
elif self.typ == MRIM_CS_CHANGE_STATUS:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_GET_MPOP_SESSION_ACK:
self['status'] = self._read_ul()
self['session'] = self._read_lps()
elif self.typ == MRIM_CS_WP_REQUEST:
current_position = self.io.tell()
while 1:
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
field = self._read_ul()
self[field] = self._read_lps()
current_position = self.io.tell()
else:
break
elif self.typ == MRIM_CS_ANKETA_INFO:
self['status'] = self._read_ul()
self['fields_num'] = self._read_ul()
self['max_rows'] = self._read_ul()
self['server_time'] = self._read_ul()
self['fields'] = [self._read_lps() for i in range(self['fields_num'])]
self['values'] = []
current_position = self.io.tell()
while 1:
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
self['values'].append(tuple([self._read_lps() for i in range(self['fields_num'])]))
current_position = self.io.tell()
else:
break
elif self.typ == MRIM_CS_MAILBOX_STATUS:
self['count'] = self._read_ul()
self['sender'] = self._read_lps()
self['subject'] = self._read_lps()
self['unix_time'] = self._read_ul()
self['key'] = self._read_ul()
elif self.typ == MRIM_CS_MAILBOX_STATUS_OLD:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_CONTACT_LIST2:
self['status'] = self._read_ul()
if self['status'] == GET_CONTACTS_OK:
self['groups_number'] = self._read_ul()
self['groups_mask'] = self._read_lps()
self['contacts_mask'] = self._read_lps()
self['groups'] = [
self._read_masked_field(self['groups_mask']) \
for i in range(self['groups_number'])
]
self['contacts'] = []
while 1:
current_position = self.io.tell()
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
self['contacts'].append(
self._read_masked_field(self['contacts_mask'])
)
else:
break
else:
self['groups_number'] = 0
self['groups_mask'] = self['contacts_mask'] = ''
self['groups'] = self['contacts'] = []
elif self.typ == MRIM_CS_LOGIN2:
self['login'] = self._read_lps()
self['password'] = self._read_lps()
self['status'] = self._read_ul()
self['user_agent'] = self._read_lps()
elif self.typ == MRIM_CS_SMS:
self['UNKNOWN'] = self._read_ul()
self['number'] = self._read_lps()
self['text'] = self._read_lps()
elif self.typ == MRIM_CS_SMS_ACK:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_USER_INFO:
current_position = self.io.tell()
while 1:
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
field = self._read_lps()
if field == 'MESSAGES.TOTAL':
self['total'] = int(self._read_lps())
elif field == 'MESSAGES.UNREAD':
self['unread'] = int(self._read_lps())
elif field == 'MRIM.NICKNAME':
self['nickname'] = self._read_lps()
else:
self[field] = self._read_lps()
current_position = self.io.tell()
else:
break
def _dict2str(self, dict):
self.io = cStringIO.StringIO()
if self.typ == MRIM_CS_HELLO_ACK:
self._write_ul(dict['ping_period'])
elif self.typ == MRIM_CS_LOGIN_REJ:
self._write_lps(dict['reason'])
elif self.typ == MRIM_CS_MESSAGE:
self._write_ul(dict['flags'])
self._write_lps(dict['to'])
self._write_lps(dict['message'])
self._write_lps(dict['rtf-message'])
elif self.typ == MRIM_CS_MESSAGE_ACK:
self._write_ul(dict['msg_id'])
self._write_ul(dict['flags'])
self._write_lps(dict['from'])
self._write_lps(dict['message'])
self._write_lps(dict['rtf-message'])
elif self.typ == MRIM_CS_MESSAGE_RECV:
self._write_lps(dict['from'])
self._write_ul(dict['msg_id'])
elif self.typ == MRIM_CS_MESSAGE_STATUS:
self._write_ul(dict['status'])
elif self.typ == MRIM_CS_USER_STATUS:
self._write_ul(dict['status'])
self._write_lps(dict['user'])
elif self.typ == MRIM_CS_LOGOUT:
self._write_ul(dict['reason'])
elif self.typ == MRIM_CS_CONNECTION_PARAMS:
self._write_ul(dict['ping_period'])
elif self.typ == MRIM_CS_ADD_CONTACT:
self._write_ul(dict['flags'])
self._write_ul(dict['group_id'])
self._write_lps(dict['email'])
self._write_lps(dict['name'])
self._write_lps(dict['phones'])
self._write_lps(dict['text'])
elif self.typ == MRIM_CS_ADD_CONTACT_ACK:
self._write_ul(dict['status'])
self._write_ul(dict['contact_id'])
elif self.typ == MRIM_CS_MODIFY_CONTACT:
self._write_ul(dict['id'])
self._write_ul(dict['flags'])
self._write_ul(dict['group_id'])
self._write_lps(dict['contact'])
self._write_lps(dict['name'])
self._write_lps(dict['phones'])
elif self.typ == MRIM_CS_MODIFY_CONTACT_ACK:
self._write_ul(dict['status'])
elif self.typ == MRIM_CS_OFFLINE_MESSAGE_ACK:
self._write_uidl(dict['uidl'])
self._write_lps(dict['message'])
elif self.typ == MRIM_CS_DELETE_OFFLINE_MESSAGE:
self._write_uidl(dict['uidl'])
elif self.typ == MRIM_CS_AUTHORIZE:
self._write_lps(dict['user'])
elif self.typ == MRIM_CS_AUTHORIZE_ACK:
self._write_lps(dict['user'])
elif self.typ == MRIM_CS_CHANGE_STATUS:
self._write_ul(dict['status'])
elif self.typ == MRIM_CS_GET_MPOP_SESSION_ACK:
self._write_ul(dict['status'])
self._write_lps(dict['session'])
elif self.typ == MRIM_CS_WP_REQUEST:
for k,v in [(p,s) for p,s in dict.items() if p != MRIM_CS_WP_REQUEST_PARAM_ONLINE]:
self._write_ul(k)
self._write_lps(v)
if dict.has_key(MRIM_CS_WP_REQUEST_PARAM_ONLINE):
self._write_ul(MRIM_CS_WP_REQUEST_PARAM_ONLINE)
self._write_lps(dict[MRIM_CS_WP_REQUEST_PARAM_ONLINE])
elif self.typ == MRIM_CS_ANKETA_INFO:
self._write_ul(dict['status'])
self._write_ul(dict['fields_num'])
self._write_ul(dict['max_rows'])
self._write_ul(dict['server_time'])
for field in dict['fields']:
self._write_lps(field)
for value in dict['values']:
self._write_lps(value)
elif self.typ == MRIM_CS_MAILBOX_STATUS:
self._write_ul(dict['status'])
elif self.typ == MRIM_CS_LOGIN2:
self._write_lps(dict['login'])
self._write_lps(dict['password'])
self._write_ul(dict['status'])
self._write_lps(dict['user_agent'])
elif self.typ == MRIM_CS_SMS:
self._write_ul(dict['UNKNOWN'])
self._write_lps(dict['number'])
self._write_lps(dict['text'])
self.io.seek(0)
return self.io.read()
def _read_ul(self):
return struct.unpack('I', self.io.read(4))[0]
def _read_lps(self):
return self.io.read(self._read_ul())
def _read_uidl(self):
return self.io.read(8)
def _write_ul(self, ul):
self.io.write(struct.pack('I', ul))
def _write_lps(self, lps):
self._write_ul(len(lps))
self.io.write(lps)
def _write_uidl(self, uidl):
self.io.write(uidl[:8])
def _read_masked_field(self, mask):
group = []
for i in range(len(mask)):
symbol = mask[i]
if symbol == 'u':
group.append(self._read_ul())
elif symbol == 's':
group.append(self._read_lps())
return tuple(group)
class MMPPacket:
def __init__(self,typ=0,seq=0,fromip='0.0.0.0',fromport='0',dict={},packet=''):
self.header = ''
self.body = ''
self.typ = typ
if packet:
raw_header = packet[:44]
try:
magic = struct.unpack('I', raw_header[:4])[0]
except:
magic = 0
if magic == CS_MAGIC:
self.header = MMPHeader(header=raw_header)
if self.header:
self.typ = self.header['msg']
dlen = self.header['dlen']
self.body = MMPBody(typ=self.typ,body=packet[44:44+dlen])
else:
self.body = MMPBody(self.typ,dict)
dlen = len(self.body.__str__())
self.header = MMPHeader(self.typ,dlen,seq,fromip,fromport)
self.setHeaderAttr('seq', utils.seq())
def __str__(self):
return self.header.__str__() + self.body.__str__()
def getRawVersion(self):
return self.header['proto']
def getVersion(self):
p = self.getRawVersion()
return '%s.%s' % (utils.get_proto_major(p), utils.get_proto_minor(p))
def getType(self):
return self.header['msg']
def getHeader(self):
return self.header
def getBody(self):
return self.body
def getBodyAttr(self, attr):
return self.body[attr]
def getHeaderAttr(self, attr):
return self.header[attr]
def setHeaderAttr(self, attr, val):
self.header[attr] = val
def setBodyAttr(self, attr, val):
self.body[attr] = val
self.body = MMPBody(self.getType(),dict=self.body)
self.setHeaderAttr('dlen', len(self.body.__str__()))
def setIp(self, ip):
self.setHeaderAttr('from', ip)
def setPort(self, port):
self.setHeaderAttr('fromport', port)
def setType(self, new_typ):
self.setHeaderAttr['msg'] = new_typ
def setId(self, _id):
self.setHeaderAttr('seq', _id)
def getId(self):
return self.getHeaderAttr('seq')
def setMsgId(self, msg_id):
self.setBodyAttr('msg_id', msg_id)
def getMsgId(self):
if self.getBody().has_key('msg_id'):
return self.getBodyAttr('msg_id')
class Message(MMPPacket):
def __init__(self,to='',body=' ',flags=[],payload=None):
if not payload:
d = {}
flags_sum = 0
for f in flags:
flags_sum += f
d['flags'] = flags_sum & MESSAGE_USERFLAGS_MASK
d['to'] = to
d['message'] = body
if MESSAGE_FLAG_RTF in flags:
d['rtf-message'] = utils.pack_rtf(body)
else:
d['rtf-message'] = ' '
MMPPacket.__init__(self,typ=MRIM_CS_MESSAGE,dict=d)
self.setHeaderAttr('seq', utils.seq())
else:
MMPPacket.__init__(self,typ=payload.getType(),dict=payload.getBody())
def getTo(self):
return self.getBodyAttr('to')
def getFrom(self):
return self.getBodyAttr('from')
def getBodyPayload(self):
return utils.win2str(self.getBodyAttr('message'))
def getFlags(self):
flag_code = self.getBodyAttr('flags')
flags = []
for f in message_flags:
x = flag_code & f
if x:
flags.append(x)
return flags
def hasFlag(self, flag):
return flag in self.getFlags()
class OfflineMessage(UserDict.UserDict):
def __init__(self, data):
UserDict.UserDict.__init__(self)
self.msg = email.message_from_string(data)
self.boundary = self.msg['Boundary']
self.payload = self.msg.get_payload().split('--%s--' % self.boundary)
self['from'] = self.msg['From']
self['date'] = parsedate(self.msg['Date'])
self['subject'] = self.msg['Subject']
self['flags'] = eval('0x'+self.msg['X-MRIM-Flags'])
self['version'] = self.msg['Version']
self['message'] = utils.win2str(self.payload[0].strip())
self['rtf-message'] = self.payload[1].strip()
def buildMessage(self):
d = {
'msg_id':0,
'flags':self['flags'],
'from':self['from'],
'message':self.payload[0].strip(),
'rtf-message':self['rtf-message']
}
m = MMPPacket(typ=MRIM_CS_MESSAGE_ACK,dict=d)
return Message(payload=m)
def getUTCTime(self):
return utils.msk2utc(self['date'])
class Anketa(MMPPacket):
def __init__(self, data):
MMPPacket.__init__(self,packet=data)
def getStatus(self):
return self.getBodyAttr('status')
def getFields(self):
return self.getBodyAttr('fields')
def getVCards(self):
vcards = []
fields = self.getFields()
for card in self.getBodyAttr('values'):
card_dict = {}
for n in range(self.getBodyAttr('fields_num')):
card_dict[fields[n]] = utils.win2str(card[n])
vcards.append(card_dict)
return vcards
class ContactList:
def __init__(self, packet=None):
self.cids = {}
self.users = {}
self.group = {}
if packet:
self.packet = packet
self.users = self.getUsers()
self.groups = self.getGroups()
i = 0
for u in self.packet.getBodyAttr('contacts'):
_id = 20+i
if (u[0] & CONTACT_FLAG_SMS):
self.cids[u[6]] = _id
else:
self.cids[u[2]] = _id
i += 1
def getGroups(self):
d = {}
for g in self.packet.getBodyAttr('groups'):
d[g[0]] = {'name':utils.win2str(g[1])}
return d
def getUsers(self):
d = {}
for u in self.packet.getBodyAttr('contacts'):
contact = {
'flags':u[0],
'group':u[1],
'nick':utils.win2str(u[3]),
'server_flags':u[4],
'status':u[5],
'phones':u[6]
}
if (u[0] & CONTACT_FLAG_SMS):
d[u[6]] = contact
else:
d[u[2]] = contact
return d
def getEmails(self):
return self.users.keys()
def getUserFlags(self, mail):
return self.users[mail]['flags']
def isValidUser(self, mail):
return not (self.isIgnoredUser(mail) or self.isRemovedUser(mail) or self.isSMSNumber(mail))
def isIgnoredUser(self, mail):
flags = self.getUserFlags(mail)
return bool(flags & CONTACT_FLAG_IGNORE)
def isRemovedUser(self, mail):
flags = self.getUserFlags(mail)
return bool(flags & CONTACT_FLAG_REMOVED)
def isSMSNumber(self, phone):
return not utils.is_valid_email(phone)
def getUserId(self, mail):
return self.cids[mail]
def setUserId(self, mail, _id):
self.cids[mail] = _id
def getUserStatus(self, mail):
status = 1
if utils.is_valid_email(mail):
status = self.users[mail]['status']
return status
def setUserStatus(self, mail, status):
self.users[mail]['status'] = status
def getAuthFlag(self, mail):
return self.users[mail]['server_flags']
def setAuthFlag(self, mail, flag):
self.users[mail]['server_flags'] = flag
def isAuthorized(self, mail):
return not bool(self.getAuthFlag(mail) & 0x1)
def getUserGroup(self, mail):
return self.users[mail]['group']
def setUserGroup(self, mail, gid):
self.users[mail]['group'] = gid
def getUserNick(self, mail):
return self.users[mail]['nick']
def setUserNick(self, mail, nick):
self.users[mail]['nick'] = nick
def delUser(self, mail):
return self.users.pop(mail)
def delGroup(self, gid):
return self.groups.pop(gid)
def getGroupName(self, gid):
name = 'unknown'
try:
name = self.groups[gid]
except KeyError:
pass
return name
def setGroupName(self, gid, name):
self.groups[gid] = name
def getGroupMembers(self, gid):
members = []
for u in self.users:
if self.getUserGroup(u) == gid:
members.append(u)
return members
def getPhones(self, mail):
phones = self.users[mail]['phones']
if phones:
return phones.split(',')
else:
return []
def setPhones(self, mail, phones):
self.users[mail]['phones'] = ','.join(phones[:3])
| gpl-3.0 | -3,714,155,334,947,474,000 | 26.613409 | 107 | 0.631916 | false | 2.664418 | false | false | false |
gw280/skia | tools/test_pictures.py | 1 | 6084 | '''
Compares the rendererings of serialized SkPictures to expected images.
Launch with --help to see more information.
Copyright 2012 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
# common Python modules
import os
import optparse
import sys
import shutil
import tempfile
USAGE_STRING = 'Usage: %s input... expectedDir'
HELP_STRING = '''
Compares the renderings of serialized SkPicture files and directories specified
by input with the images in expectedDir. Note, files in directoriers are
expected to end with .skp.
'''
def RunCommand(command):
"""Run a command.
@param command the command as a single string
"""
print 'running command [%s]...' % command
os.system(command)
def FindPathToProgram(program):
"""Return path to an existing program binary, or raise an exception if we
cannot find one.
@param program the name of the program that is being looked for
"""
trunk_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
possible_paths = [os.path.join(trunk_path, 'out', 'Release', program),
os.path.join(trunk_path, 'out', 'Debug', program),
os.path.join(trunk_path, 'out', 'Release',
program + ".exe"),
os.path.join(trunk_path, 'out', 'Debug',
program + ".exe")]
for try_path in possible_paths:
if os.path.isfile(try_path):
return try_path
raise Exception('cannot find %s in paths %s; maybe you need to '
'build %s?' % (program, possible_paths, program))
def RenderImages(inputs, render_dir, options):
"""Renders the serialized SkPictures.
Uses the render_pictures program to do the rendering.
@param inputs the location(s) to read the serlialized SkPictures
@param render_dir the location to write out the rendered images
"""
renderer_path = FindPathToProgram('render_pictures')
inputs_as_string = " ".join(inputs)
command = '%s %s %s' % (renderer_path, inputs_as_string, render_dir)
if (options.mode is not None):
command += ' --mode %s' % ' '.join(options.mode)
if (options.device is not None):
command += ' --device %s' % options.device
RunCommand(command)
def DiffImages(expected_dir, comparison_dir, diff_dir):
"""Diffs the rendered SkPicture images with the baseline images.
Uses the skdiff program to do the diffing.
@param expected_dir the location of the baseline images.
@param comparison_dir the location of the images to comapre with the
baseline
@param diff_dir the location to write out the diff results
"""
skdiff_path = FindPathToProgram('skdiff')
RunCommand('%s %s %s %s %s' %
(skdiff_path, expected_dir, comparison_dir, diff_dir,
'--noprintdirs'))
def Cleanup(options, render_dir, diff_dir):
"""Deletes any temporary folders and files created.
@param options The OptionParser object that parsed if render_dir or diff_dir
was set
@param render_dir the directory where the rendered images were written
@param diff_dir the directory where the diff results were written
"""
if (not options.render_dir):
if (os.path.isdir(render_dir)):
shutil.rmtree(render_dir)
if (not options.diff_dir):
if (os.path.isdir(diff_dir)):
shutil.rmtree(diff_dir)
def ModeParse(option, opt_str, value, parser):
"""Parses the --mode option of the commandline.
The --mode option will either take in three parameters (if tile or
pow2tile) or a single parameter (otherwise).
"""
result = [value]
if value == "tile":
if (len(parser.rargs) < 2):
raise optparse.OptionValueError(("--mode tile mising width"
" and/or height parameters"))
result.extend(parser.rargs[:2])
del parser.rargs[:2]
elif value == "pow2tile":
if (len(parser.rargs) < 2):
raise optparse.OptionValueError(("--mode pow2tile mising minWidth"
" and/or height parameters"))
result.extend(parser.rargs[:2])
del parser.rargs[:2]
setattr(parser.values, option.dest, result)
def Main(args):
"""Allow other scripts to call this script with fake command-line args.
@param The commandline argument list
"""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option('--render_dir', dest='render_dir',
help = ("specify the location to output the rendered files."
" Default is a temp directory."))
parser.add_option('--diff_dir', dest='diff_dir',
help = ("specify the location to output the diff files."
" Default is a temp directory."))
parser.add_option('--mode', dest='mode', type='string',
action="callback", callback=ModeParse,
help = ("specify how rendering is to be done."))
parser.add_option('--device', dest='device',
help = ("specify the device to render to."))
options, arguments = parser.parse_args(args)
if (len(arguments) < 3):
print("Expected at least one input and one ouput folder.")
parser.print_help()
sys.exit(-1)
inputs = arguments[1:-1]
expected_dir = arguments[-1]
if (options.render_dir):
render_dir = options.render_dir
else:
render_dir = tempfile.mkdtemp()
if (options.diff_dir):
diff_dir = options.diff_dir
else:
diff_dir = tempfile.mkdtemp()
try:
RenderImages(inputs, render_dir, options)
DiffImages(expected_dir, render_dir, diff_dir)
finally:
Cleanup(options, render_dir, diff_dir)
if __name__ == '__main__':
Main(sys.argv)
| bsd-3-clause | -5,432,955,374,800,524,000 | 33.179775 | 80 | 0.609796 | false | 4.11916 | false | false | false |
nosix/PyCraft | src/pycraft/service/composite/entity/monster.py | 1 | 1206 | # -*- coding: utf8 -*-
from pycraft.service.const import EntityType
from pycraft.service.primitive.geometry import Size
from .base import MobEntity
from .player import PlayerEntity
class MonsterEntity(MobEntity):
def has_hostile(self, entity):
return isinstance(entity, PlayerEntity)
class Zombie(MonsterEntity):
TYPE = EntityType.ZOMBIE
STRENGTH = 10
BODY_SIZE = Size(0.6, 0.6, 1.95)
VIEW_DISTANCE = 64
VIEW_ANGLE_H = 60
VIEW_ANGLE_V = 30
class Skeleton(MonsterEntity):
TYPE = EntityType.SKELTON
STRENGTH = 10
BODY_SIZE = Size(0.6, 0.6, 1.8)
VIEW_DISTANCE = 64
VIEW_ANGLE_H = 60
VIEW_ANGLE_V = 30
class Creeper(MonsterEntity):
TYPE = EntityType.CREEPER
STRENGTH = 10
BODY_SIZE = Size(0.6, 0.6, 1.8)
VIEW_DISTANCE = 64
VIEW_ANGLE_H = 60
VIEW_ANGLE_V = 30
class Spider(MonsterEntity):
TYPE = EntityType.SPIDER
STRENGTH = 8
BODY_SIZE = Size(1.4, 1.4, 0.9)
VIEW_DISTANCE = 32
def can_climb(self):
return True
class Enderman(MonsterEntity):
TYPE = EntityType.ENDERMAN
STRENGTH = 20
BODY_SIZE = Size(0.6, 0.6, 2.9)
VIEW_ANGLE_H = 90
VIEW_ANGLE_V = 10
| lgpl-3.0 | 2,602,586,687,267,260,000 | 18.451613 | 51 | 0.64262 | false | 2.963145 | false | false | false |
droundy/deft | papers/thesis-kirstie/figs/plot_LJ_Potential.py | 1 | 1142 | #!/usr/bin/python3
#RUN this program from the directory it is listed in
#with command ./plot_LJ_Potential.py
from scipy import special
import numpy as np
import matplotlib.pyplot as plt
import math
#Plot WCA Potential vs r
#R=1/1.781797436 #for a sigma=1 DOESN'T WORK!! graph wrong shape!
R=1/1.781797436
epsilon=1
sigma=1
#print sigma
#r=np.linspace(.1, 2*R, 200)
#r=np.linspace(.9, 4, 200) #SAVE!!! for plotting r
r=np.linspace(.9, 2.5, 200)
r_dless=sigma/r #plot dimensionless quantity!
sigma_over_r_to_pow6=(r_dless)*(r_dless)*(r_dless)*(r_dless)*(r_dless)*(r_dless)
#V=4*epsilon*(sigma_over_r_to_pow6*sigma_over_r_to_pow6 - sigma_over_r_to_pow6) + epsilon #WCA potential
#V=4*epsilon*(sigma_over_r_to_pow6*sigma_over_r_to_pow6 - sigma_over_r_to_pow6) #LJ potential but looks like WCA
V=4*epsilon*(sigma_over_r_to_pow6*sigma_over_r_to_pow6 - sigma_over_r_to_pow6) #LJ potential
plt.plot(1/r_dless,V)
plt.xlim(right=2.5)
plt.ylim(top=V.max())
plt.xlabel('r/$\sigma$')
#plt.xlabel('r')
plt.ylabel('V(r)/$\epsilon$')
plt.title('Leonard-Jones Potential')
#plt.legend()
plt.savefig("LJ_Potential.pdf")
# plt.show()
| gpl-2.0 | -5,032,273,101,040,815,000 | 23.826087 | 113 | 0.697023 | false | 2.344969 | false | false | false |
MRCIEU/melodi | melodi/settings.py | 1 | 8804 | """
Django settings for melodi project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from datetime import timedelta
from celery.schedules import crontab,timedelta
from django.core.urlresolvers import reverse_lazy
import config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config.secret_key
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
#ALLOWED_HOSTS = []
#Add this for public
ALLOWED_HOSTS = ['melodi.biocompute.org.uk','www.melodi.biocompute.org.uk','melodi.mrcieu.ac.uk']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'browser',
'social_auth',
'django.contrib.humanize'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
#'django.middleware.cache.UpdateCacheMiddleware', #need this for cache
'django.middleware.common.CommonMiddleware',
#'django.middleware.cache.FetchFromCacheMiddleware', #need this for cache
)
AUTHENTICATION_BACKENDS = (
'social_auth.backends.google.GoogleOAuth2Backend',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_ENABLED_BACKENDS = ('google')
LOGIN_URL = '/login/'
LOGIN_ERROR_URL = '/login-error/'
LOGIN_REDIRECT_URL = reverse_lazy('home')
GOOGLE_OAUTH2_CLIENT_ID = '744265706742-h9l3etr7pdboc8d0h0b14biiemtfsbvb.apps.googleusercontent.com'
GOOGLE_OAUTH2_CLIENT_SECRET = 'BsQyz4BxaC82kYD_O5UHcgaF'
#GOOGLE_WHITE_LISTED_DOMAINS = ['bristol.ac.uk']
SOCIAL_AUTH_USER_MODEL = 'auth.User'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'social_auth.context_processors.social_auth_by_type_backends'
)
ROOT_URLCONF = 'melodi.urls'
APPEND_SLASH = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'browser/templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'melodi.wsgi.application'
SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
#'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
#}
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': '/var/django/melodi/mysql.cnf',
},
}
}
# NEO4J_DATABASES = {
# 'default' : {
# 'HOST':'10.0.2.2',
# 'PORT':7474,
# 'ENDPOINT':'/db/data'
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
#STATIC_ROOT = '/var/django/melodi/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
STATIC_URL = '/static/'
MEDIA_ROOT = '/var/django/melodi/'
DATA_FOLDER = os.path.join(BASE_DIR,"data/")
# CELERY SETTINGS
BROKER_URL = 'redis://localhost:6379/0'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERY_ACKS_LATE = True
#restart the worker process after every task to avoid memory leaks
CELERYD_MAX_TASKS_PER_CHILD = 1
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
#'level': 'WARNING',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'debug.log'),
#'filename': '/tmp/debug.log',
'formatter': 'verbose'
},
'console': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
},
},
'loggers': {
#'django': {
# 'handlers':['file'],
# 'propagate': True,
# 'level':'INFO',
#},
'celery': {
'handlers': ['console'],
'propagate': False,
'level': 'WARNING',
},
'browser': {
'handlers': ['file'],
'level': 'DEBUG',
},
}
}
#CACHE_MIDDLEWARE_ALIAS = 'default'
#CACHE_MIDDLEWARE_SECONDS = 60480000
#CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
#"SOCKET_TIMEOUT": 50,
},
"KEY_PREFIX": "melodi",
'TIMEOUT': None
}
}
#CACHES = {
# 'default': {
# #'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
# 'LOCATION': 'melodi_cache',
# 'TIMEOUT': None
# }
#}
CELERYBEAT_SCHEDULE = {
#'t1': {
# 'task': 'tasks.test_scheduler',
# 'schedule': timedelta(seconds=10),
#},
#update pubmed-mesh relationships every dat at 3am
'dm': {
'task': 'tasks.daily_mesh',
#'schedule': timedelta(hours=1),
'schedule': crontab(hour=3, minute=0),#
},
#'neo': {
# 'task': 'tasks.neo4j_check',
# #'schedule': timedelta(hours=1),
# 'schedule': timedelta(minutes=30),#
#},
}
# Logging
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': True,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse'
# }
# },
# 'formatters': {
# 'verbose': {
# 'format': '[%(asctime)s] %(levelname)-8s %(process)d %(thread)d %(name)s:%(message)s',
# 'datefmt': '%Y-%m-%d %a %H:%M:%S'
# },
# },
# 'handlers': {
# 'null': {
# 'level': 'DEBUG',
# 'class': 'django.utils.log.NullHandler',
# },
# 'console': {
# 'level': 'DEBUG',
# 'class': 'logging.StreamHandler',
# 'formatter': 'verbose'
# },
# 'local_file': {
# 'level': 'DEBUG',
# 'class': 'logging.handlers.RotatingFileHandler',
# 'formatter': 'verbose',
# #'filename': '%s/debug.log' % APP_ROOT,
# 'filename': os.path.join(BASE_DIR, 'debug2.log'),
# 'maxBytes': 1024 * 1024 * 10,
# },
# 'syslog': {
# 'level': 'INFO',
# 'class': 'logging.handlers.SysLogHandler',
# },
# 'mail_admins': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'django.utils.log.AdminEmailHandler',
# 'include_html': True,
# }
# },
# 'loggers': {
# 'django': {
# 'handlers': ['null'],
# 'propagate': True,
# 'level': 'INFO',
# },
# 'django.request': {
# 'handlers': ['mail_admins', 'console', 'local_file'],
# 'level': 'ERROR',
# 'propagate': False,
# },
# },
# 'root': {
# 'handlers': ['console', 'local_file'],
# 'level': 'DEBUG',
# }
# }
| mit | -2,412,759,635,708,730,400 | 26.860759 | 100 | 0.578828 | false | 3.29491 | false | false | false |
JonathanSeguin/Mariana | Mariana/regularizations.py | 1 | 1924 | from Mariana.abstraction import Abstraction_ABC
__all__ = ["SingleLayerRegularizer_ABC", "L1", "L2", "ActivationL1"]
class SingleLayerRegularizer_ABC(Abstraction_ABC) :
"""An abstract regularization to be applied to a layer."""
def apply(self, layer) :
"""Apply to a layer and update networks's log"""
hyps = {}
for k in self.hyperParameters :
hyps[k] = getattr(self, k)
message = "%s uses %s regularization" % (layer.name, self.__class__.__name__)
layer.network.logLayerEvent(layer, message, hyps)
return self.getFormula(layer)
def getFormula(self, layer) :
"""Returns the expression to be added to the cost"""
raise NotImplemented("Must be implemented in child")
class L1(SingleLayerRegularizer_ABC) :
"""
Will add this to the cost. Weights will tend towards 0
resulting in sparser weight matrices.
.. math::
factor * abs(Weights)
"""
def __init__(self, factor) :
SingleLayerRegularizer_ABC.__init__(self)
self.factor = factor
self.hyperParameters = ["factor"]
def getFormula(self, layer) :
return self.factor * ( abs(layer.parameters["W"]).sum() )
class L2(SingleLayerRegularizer_ABC) :
"""
Will add this to the cost. Causes the weights to stay small
.. math::
factor * (Weights)^2
"""
def __init__(self, factor) :
SingleLayerRegularizer_ABC.__init__(self)
self.factor = factor
self.hyperParameters = ["factor"]
def getFormula(self, layer) :
return self.factor * ( (layer.parameters["W"] ** 2).sum() )
class ActivationL1(SingleLayerRegularizer_ABC) :
"""
L1 on the activations. Neurone activations will tend towards
0, resulting into sparser representations.
Will add this to the cost
.. math::
factor * abs(activations)
"""
def __init__(self, factor) :
SingleLayerRegularizer_ABC.__init__(self)
self.factor = factor
self.hyperParameters = ["factor"]
def getFormula(self, layer) :
return self.factor * ( abs(layer.outputs).sum() ) | apache-2.0 | -8,295,609,861,615,857,000 | 26.898551 | 79 | 0.692308 | false | 3.228188 | false | false | false |
crentagon/chess-with-benefits | game/chess/show_piece_stats.py | 1 | 2090 |
def run(self, board_input, i, j):
origin_piece = board_input[i][j].piece
max_control = {
1: 2,
3: 8,
4: 13,
5: 14,
9: 27,
0: 8
}
origin_piece.status = 'Healthy'
is_threatened_undefended = len(origin_piece.attackers) > len(origin_piece.defenders)
is_threatened_by_lower_rank = [x for x in origin_piece.attackers if x < origin_piece.piece_type]
is_ample_activity = origin_piece.tiles_controlled > 0.6*max_control[origin_piece.piece_type]
offensive_power = len(origin_piece.offensive_power)
defensive_power = len(origin_piece.defensive_power)
# Threatened (being attacked by a piece without being defended OR being attacked by a piece of lower rank)
if is_threatened_by_lower_rank or is_threatened_undefended:
origin_piece.status = 'Threatened'
# Warrior (attacking at least one piece OR in a valuable position OR at 60% maximum activity)
elif offensive_power >= 2 or is_ample_activity:
origin_piece.status = 'Warrior'
# Defender (defending at least two pieces)
elif defensive_power >= 2:
origin_piece.status = 'Defender'
self.piece_stats = {
'is_piece_white': origin_piece.is_white,
'piece_type': origin_piece.piece_type,
'tile_control_count': origin_piece.tiles_controlled,
'defenders': origin_piece.defenders,
'attackers': origin_piece.attackers,
'defensive_power': origin_piece.defensive_power,
'offensive_power': origin_piece.offensive_power,
'status': origin_piece.status
}
# "Status":
# Defender/Royal Defender (defending at least two pieces/Defending the King)
# Warrior (attacking at least one piece OR in a valuable position OR at 60% maximum activity)
# Healthy (default)
# Threatened (being attacked by a piece without being defended OR being attacked by a piece of lower rank)
# Note: place its value right next to it
# Number of tiles controlled: "Tile Control Count: " // add counter at the bottom
# Number of pieces attacking it: "Attackers: "
# Number of pieces defending it: "Supporters: "
# Number of pieces it is attacking: "Offensive power: "
# Number of pieces it is defending: "Defensive power: " | gpl-3.0 | -6,851,965,051,956,522,000 | 36.339286 | 107 | 0.728708 | false | 2.906815 | false | false | false |
leahrnh/ticktock_text_api | breakdown_detector.py | 1 | 1533 | import readall
import gensim
import nltk
import numpy as np
import pickle
# we need to extract some features, now we make it easy now to just use the word2vec, one turn previous turn.
#
model = gensim.models.Word2Vec.load('/tmp/word2vec_50_break')
all_v1 = readall.readall('/home/ubuntu/zhou/Backend/rating_log/v1')
all_v2 = readall.readall('/home/ubuntu/zhou/Backend/rating_log/v2')
all_v3 = readall.readall('/home/ubuntu/zhou/Backend/rating_log/v3')
all_logs = dict(all_v1.items() + all_v2.items() + all_v3.items())
sent_vec = None
for item in all_logs:
print item
conv = all_logs[item]["Turns"]
for turn in conv:
turn_vec_1 = sum(model[nltk.word_tokenize(conv[turn]["You"])])
if len(nltk.word_tokenize(conv[turn]["TickTock"])) ==0:
continue
#print 'TickTock'
#print conv[turn]["TickTock"]
turn_vec_2 = sum(model[nltk.word_tokenize(conv[turn]["TickTock"])])
#print turn_vec_1
#print turn_vec_2
if sent_vec is None:
sent_vec = np.hstack((turn_vec_1,turn_vec_2))
target = np.array(int(conv[turn]["Appropriateness"]))
else:
sent_vec = np.vstack((sent_vec,np.hstack((turn_vec_1,turn_vec_2))))
target = np.hstack((target,int(conv[turn]["Appropriateness"])))
sent = {'data':sent_vec,'target':target}
print sent
with open('sent.pkl','w') as f:
pickle.dump(sent,f)
| gpl-2.0 | 3,179,714,820,388,494,300 | 40.432432 | 109 | 0.589693 | false | 3.275641 | false | false | false |
PercyODI/PythonCSharpOOComparison | Utilities/checkLinks.py | 1 | 1091 | import sys, os, re
pattern = re.compile('\[.+\]\((?P<file>.+?)\)', re.MULTILINE) # Matches [text](directory/file.md)
folderDict = {}
numBadLinks = 0;
os.chdir("..") # Assumes this utility is one directory deep.
startDirectory = os.path.abspath(".")
mdFiles = []
for root, subFolders, files in os.walk("."):
if("\." in root):
continue
for f in files:
if ".md" in f: # Only modify MarkDown files
mdFiles.append(os.path.abspath(os.path.join(root, f)))
for mdFile in mdFiles:
os.chdir(os.path.dirname(mdFile))
fileContent = open(mdFile, 'r')
for lineNum, line in enumerate(fileContent, start=1):
matches = pattern.findall(line)
for match in matches:
if not os.path.isfile(match):
numBadLinks += 1
print("\n")
print(os.path.relpath(mdFile, startDirectory) + ", line " + str(lineNum))
print("\t" + match + " is a bad link.")
print("\n")
if numBadLinks < 1:
print("No Bad Links Found!")
else:
print("Found " + str(numBadLinks) + " bad links.")
| mit | 8,224,985,454,478,719,000 | 30.171429 | 97 | 0.583868 | false | 3.388199 | false | false | false |
ActiveState/code | recipes/Python/577746_Inherit_Method_Docstrings_Using_Only/recipe-577746.py | 1 | 2640 | """docfunc module"""
from deferred_binder import DeferredBinder
class DocFunc(DeferredBinder):
TRIGGER = None
def __init__(self, f):
super().__init__(f.__name__, f)
self.f = self.target
@staticmethod
def transform(name, context, target, obj=None):
"""The DeferredBinder transform for this subclass.
name - the attribute name to which the function will be bound.
context - the class/namespace to which the function will be bound.
target - the function that will be bound.
obj - ignored.
The DeferredBinder descriptor class will replace itself with the
result of this method, when the name to which the descriptor is requested
for the first time. This can be on the class or an instances of the
class.
This way the class to which the method is bound is available so that the
inherited docstring can be identified and set.
"""
namespace, cls = context
doc = target.__doc__
if doc == DocFunc.TRIGGER:
doc = DocFunc.get_doc(cls, name, DocFunc.TRIGGER)
target.__doc__ = doc
return target
@staticmethod
def get_doc(cls, fname, default=TRIGGER, member=True):
"""Returns the function docstring the method should inherit.
cls - the class from which to start looking for the method.
fname - the method name on that class
default - the docstring to return if none is found.
member - is the target function already bound to cls?
"""
print(cls)
bases = cls.__mro__[:]
if member:
bases = bases[1:]
for base in bases:
print(base)
func = getattr(base, fname, None)
if not func:
continue
doc = getattr(func, '__doc__', default)
if doc == default:
continue
return doc
return default
@staticmethod
def inherits_docstring(f, context=None, fname=None, default=TRIGGER):
"""A decorator that returns a new DocFunc object.
f - the function to decorate.
context - the class/namespace where the function is bound, if known.
fname - the function name in that context, if known.
default - the docstring to return if none is found.
"""
if context is not None:
cls, namespace = context
fname = fname or f.__name__
f.__doc__ = DocFunc.get_doc(cls, fname, default, False)
return f
return DocFunc(f, default)
| mit | 191,561,495,246,149,060 | 31.592593 | 81 | 0.5875 | false | 4.714286 | false | false | false |
google/cauliflowervest | cauliflowervest/client/base_client.py | 1 | 9496 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base CauliflowerVestClient class."""
import httplib
import json
import logging
import ssl
import time
import urllib
import urllib2
import webbrowser
import httplib2
import oauth2client.client
import oauth2client.tools
from cauliflowervest import settings as base_settings
from cauliflowervest.client import settings
from cauliflowervest.client import util
# Prefix to prevent Cross Site Script Inclusion.
JSON_PREFIX = ")]}',\n"
class Error(Exception):
"""Class for domain specific exceptions."""
class UserAbort(Error):
"""User aborted process."""
class AuthenticationError(Error):
"""There was an error with authentication."""
class RequestError(Error):
"""There was an error interacting with the server."""
class NotFoundError(RequestError):
"""No passphrase was found."""
class MetadataError(Error):
"""There was an error with machine metadata."""
class CauliflowerVestClient(object):
"""Client to interact with the CauliflowerVest service."""
ESCROW_PATH = None # String path to escrow to, set by subclasses.
# Sequence of key names of metadata to require; see GetAndValidateMetadata().
REQUIRED_METADATA = []
# The metadata key under which the passphrase is stored.
PASSPHRASE_KEY = 'passphrase'
MAX_TRIES = 5 # Number of times to try an escrow upload.
TRY_DELAY_FACTOR = 5 # Number of seconds, (* try_num), to wait between tries.
XSRF_PATH = '/xsrf-token/%s'
def __init__(self, base_url, opener, headers=None):
self._metadata = None
self.base_url = base_url
self.xsrf_url = util.JoinURL(base_url, self.XSRF_PATH)
if self.ESCROW_PATH is None:
raise ValueError('ESCROW_PATH must be set by CauliflowerVestClient subclasses.')
self.escrow_url = util.JoinURL(base_url, self.ESCROW_PATH)
self.opener = opener
self.headers = headers or {}
def _GetMetadata(self):
"""Returns a dict of key/value metadata pairs."""
raise NotImplementedError
def RetrieveSecret(self, target_id):
"""Fetches and returns the passphrase.
Args:
target_id: str, Target ID to fetch the passphrase for.
Returns:
str: passphrase.
Raises:
RequestError: there was an error downloading the passphrase.
NotFoundError: no passphrase was found for the given target_id.
"""
xsrf_token = self._FetchXsrfToken(base_settings.GET_PASSPHRASE_ACTION)
url = '%s?%s' % (util.JoinURL(self.escrow_url, urllib.quote(target_id)),
urllib.urlencode({'xsrf-token': xsrf_token}))
request = urllib2.Request(url)
try:
response = self.opener.open(request)
except urllib2.URLError as e: # Parent of urllib2.HTTPError.
if isinstance(e, urllib2.HTTPError):
e.msg += ': ' + e.read()
if e.code == httplib.NOT_FOUND:
raise NotFoundError('Failed to retrieve passphrase. %s' % e)
raise RequestError('Failed to retrieve passphrase. %s' % e)
content = response.read()
if not content.startswith(JSON_PREFIX):
raise RequestError('Expected JSON prefix missing.')
data = json.loads(content[len(JSON_PREFIX):])
return data[self.PASSPHRASE_KEY]
def GetAndValidateMetadata(self):
"""Retrieves and validates machine metadata.
Raises:
MetadataError: one or more of the REQUIRED_METADATA were not found.
"""
if not self._metadata:
self._metadata = self._GetMetadata()
for key in self.REQUIRED_METADATA:
if not self._metadata.get(key, None):
raise MetadataError('Required metadata is not found: %s' % key)
def SetOwner(self, owner):
if not self._metadata:
self.GetAndValidateMetadata()
self._metadata['owner'] = owner
def _FetchXsrfToken(self, action):
request = urllib2.Request(self.xsrf_url % action)
response = self._RetryRequest(request, 'Fetching XSRF token')
return response.read()
def _RetryRequest(self, request, description, retry_4xx=False):
"""Make the given HTTP request, retrying upon failure."""
for k, v in self.headers.iteritems():
request.add_header(k, v)
for try_num in range(self.MAX_TRIES):
try:
return self.opener.open(request)
except urllib2.URLError as e: # Parent of urllib2.HTTPError.
if isinstance(e, urllib2.HTTPError):
e.msg += ': ' + e.read()
# Reraise if HTTP 4xx and retry_4xx is False
if 400 <= e.code < 500 and not retry_4xx:
raise RequestError('%s failed: %s' % (description, e))
# Otherwise retry other HTTPError and URLError failures.
if try_num == self.MAX_TRIES - 1:
logging.exception('%s failed permanently.', description)
raise RequestError(
'%s failed permanently: %s' % (description, e))
logging.warning(
'%s failed with (%s). Retrying ...', description, e)
time.sleep((try_num + 1) * self.TRY_DELAY_FACTOR)
def IsKeyRotationNeeded(self, target_id, tag='default'):
"""Check whether a key rotation is required.
Args:
target_id: str, Target ID.
tag: str, passphrase tag.
Raises:
RequestError: there was an error getting status from server.
Returns:
bool: True if a key rotation is required.
"""
url = '%s?%s' % (
util.JoinURL(
self.base_url, '/api/v1/rekey-required/',
self.ESCROW_PATH, target_id),
urllib.urlencode({'tag': tag}))
request = urllib2.Request(url)
try:
response = self.opener.open(request)
except urllib2.URLError as e: # Parent of urllib2.HTTPError.
if isinstance(e, urllib2.HTTPError):
e.msg += ': ' + e.read()
raise RequestError('Failed to get status. %s' % e)
content = response.read()
if not content.startswith(JSON_PREFIX):
raise RequestError('Expected JSON prefix missing.')
return json.loads(content[len(JSON_PREFIX):])
def UploadPassphrase(self, target_id, passphrase, retry_4xx=False):
"""Uploads a target_id/passphrase pair with metadata.
Args:
target_id: str, Target ID.
passphrase: str, passphrase.
retry_4xx: bool, whether to retry when errors are in the 401-499 range.
Raises:
RequestError: there was an error uploading to the server.
"""
xsrf_token = self._FetchXsrfToken(base_settings.SET_PASSPHRASE_ACTION)
# Ugh, urllib2 only does GET and POST?!
class PutRequest(urllib2.Request):
def __init__(self, *args, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers']['Content-Type'] = 'application/octet-stream'
urllib2.Request.__init__(self, *args, **kwargs)
self._method = 'PUT'
def get_method(self): # pylint: disable=g-bad-name
return 'PUT'
if not self._metadata:
self.GetAndValidateMetadata()
parameters = self._metadata.copy()
parameters['xsrf-token'] = xsrf_token
parameters['volume_uuid'] = target_id
url = '%s?%s' % (self.escrow_url, urllib.urlencode(parameters))
request = PutRequest(url, data=passphrase)
self._RetryRequest(request, 'Uploading passphrase', retry_4xx=retry_4xx)
def BuildOauth2Opener(credentials):
"""Produce an OAuth compatible urllib2 OpenerDirective."""
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
context.verify_mode = ssl.CERT_REQUIRED
ca_certs_file = settings.ROOT_CA_CERT_CHAIN_PEM_FILE_PATH
context.load_verify_locations(ca_certs_file)
opener = urllib2.build_opener(
urllib2.HTTPSHandler(context=context),
urllib2.HTTPRedirectHandler())
h = {}
credentials.apply(h)
opener.addheaders = h.items()
return opener
def GetOauthCredentials():
"""Create an OAuth2 `Credentials` object."""
if not base_settings.OAUTH_CLIENT_ID:
raise RuntimeError('Missing OAUTH_CLIENT_ID setting!')
if not settings.OAUTH_CLIENT_SECRET:
raise RuntimeError('Missing OAUTH_CLIENT_SECRET setting!')
httpd = oauth2client.tools.ClientRedirectServer(
('localhost', 0), oauth2client.tools.ClientRedirectHandler)
httpd.timeout = 60
flow = oauth2client.client.OAuth2WebServerFlow(
client_id=base_settings.OAUTH_CLIENT_ID,
client_secret=settings.OAUTH_CLIENT_SECRET,
redirect_uri='http://%s:%s/' % httpd.server_address,
scope=base_settings.OAUTH_SCOPE,
)
authorize_url = flow.step1_get_authorize_url()
webbrowser.open(authorize_url, new=1, autoraise=True)
httpd.handle_request()
if 'error' in httpd.query_params:
raise AuthenticationError('Authentication request was rejected.')
try:
credentials = flow.step2_exchange(
httpd.query_params,
http=httplib2.Http(ca_certs=settings.ROOT_CA_CERT_CHAIN_PEM_FILE_PATH))
except oauth2client.client.FlowExchangeError as e:
raise AuthenticationError('Authentication has failed: %s' % e)
else:
logging.info('Authentication successful!')
return credentials
| apache-2.0 | 633,161,878,087,102,700 | 32.202797 | 86 | 0.682393 | false | 3.763773 | false | false | false |
jeremiedecock/pyai | ailib/optimize/functions/unconstrained.py | 1 | 31848 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017,2018,2019 Jeremie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This module contains some classical test functions for unconstrained continuous
single-objective optimization.
"""
__all__ = ['sphere', 'Sphere', 'sphere1d', 'sphere2d', # TODO
'rosen', 'Rosenbrock', 'rosen2d',
'himmelblau', 'Himmelblau', 'himmelblau2d',
'rastrigin', 'Rastrigin', 'rastrigin2d',
'easom', 'Easom', 'easom2d',
'crossintray', 'Crossintray', 'crossintray2d',
'holder', 'Holder', 'holder2d']
import numpy as np
# GENERIC OBJECTIVE FUNCTION ##################################################
class _ObjectiveFunction:
"""Generic *objective function*.
TODO
"""
def __init__(self):
self._objective_function = None
self._gradient_function = None # TODO: use a generic numeric derivative function by default
self._hessian_function = None # TODO: use a generic numeric derivative function by default
self.reset_eval_counters()
self.reset_eval_logs()
self.do_eval_logs = False
self.noise = None
self.ndim = None
self.bounds = None
self.continuous = None
self.translation_vector = np.zeros(shape=self.ndim)
self.function_name = None
self.function_formula = None
self.arg_min = None
@property
def stochastic(self):
return self.noise is not None
@property
def unimodal(self):
raise NotImplementedError
def reset_eval_counters(self):
# TODO: make an external Log (or Counter) class
self.num_eval = 0
self.num_gradient_eval = 0
self.num_hessian_eval = 0
def reset_eval_logs(self):
# TODO: make an external Log class
self.eval_logs_dict = {'x': [], 'fx': []} # TODO
def __call__(self, x):
"""Evaluate one or several points.
This function is a wrapper that does several boring task aside the
evaluation of `func`: check arguments, log results, ...
Parameters
----------
func : callable object
The function used to evaluate `x`.
y : ndarray
The 1D or 2D numpy array containing the points to evaluate.
If `x` is a 2D array, the coordinates of each points are
distributed along *the first dimension*.
For instance, to evaluate the three 2D points (0,0), (1,1) and
(2,2), `x` have to be coded as the following:
`x = np.array([[0, 1, 2], [0, 1, 2]])`
so that the first point is given by `x[:,0]`, the second point by
`x[:,1]`, ... (this makes functions definition much simpler).
Returns
-------
float or ndarray
The results of the evaluation: a scalar if only one point has been
evaluated or a 1D numpy array if several points have been
evaluated.
"""
# Check self._objective_function ########
assert self._objective_function is not None
assert callable(self._objective_function)
# Check x shape #########################
if x.ndim > 0:
if x.shape[0] != self.ndim:
raise Exception('Wrong number of dimension: x has {} rows instead of {}.'.format(x.shape[0], self.ndim))
# Update the evaluations counter ########
# TODO: make an external Log (or Counter) class
if (x.ndim == 0) or (x.ndim == 1):
self.num_eval += 1
elif x.ndim == 2:
self.num_eval += x.shape[1]
else:
raise Exception('Wrong number of dimension: x is a {} dimensions numpy array ; 1 or 2 dimensions are expected.'.format(x.ndim))
# Apply translation #####################
x_translated = (x.T - self.translation_vector).T
# Eval x ################################
y = self._objective_function(x_translated)
# Apply noise ###########################
if self.noise is not None:
y = self.noise(x, y)
# Update the evals log ##################
# TODO: make an external Log class
if self.do_eval_logs:
if y.ndim == 0:
self.eval_logs_dict['x'].append(x) # TODO
elif y.ndim == 1:
self.eval_logs_dict['x'].extend(x.T) # TODO
else:
raise Exception("Wrong output dimension.")
if y.ndim == 0:
self.eval_logs_dict['fx'].append(y) # TODO
elif y.ndim == 1:
self.eval_logs_dict['fx'].extend(y) # TODO
else:
raise Exception("Wrong output dimension.")
return y
def gradient(self, x):
"""
The derivative (i.e. gradient) of the objective function.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the derivative is to be computed
or a two dimension Numpy array of points at which the derivatives are to be computed.
Returns
-------
float or array_like
gradient of the objective function at `x`.
"""
# Check self._gradient_function #########
assert self._gradient_function is not None
assert callable(self._gradient_function)
# Check x shape #########################
if x.shape[0] != self.ndim:
raise Exception('Wrong number of dimension: x has {} rows instead of {}.'.format(x.shape[0], self.ndim))
# Update the evaluations counter ########
# TODO: make an external Log (or Counter) class
if x.ndim == 1:
self.num_gradient_eval += 1
elif x.ndim == 2:
self.num_gradient_eval += x.shape[1]
else:
raise Exception('Wrong number of dimension: x is a {} dimensions numpy array ; 1 or 2 dimensions are expected.'.format(x.ndim))
# Apply translation #####################
x_translated = (x.T - self.translation_vector).T
# Eval x ################################
grad = self._gradient_function(x_translated)
return grad
def hessian(self, x):
"""
The Hessian matrix of the objective function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the objective function at `x`.
"""
# Check self._gradient_function #########
assert self._hessian_function is not None
assert callable(self._hessian_function)
# Check x shape #########################
if x.shape[0] != self.ndim:
raise Exception('Wrong number of dimension: x has {} rows instead of {}.'.format(x.shape[0], self.ndim))
# Update the evaluations counter ########
# TODO: make an external Log (or Counter) class
if x.ndim == 1:
self.num_hessian_eval += 1
elif x.ndim == 2:
self.num_hessian_eval += x.shape[1]
else:
raise Exception('Wrong number of dimension: x is a {} dimensions numpy array ; 1 or 2 dimensions are expected.'.format(x.ndim))
# Apply translation #####################
x_translated = (x.T - self.translation_vector).T
# Eval x ################################
hess = self._hessian_function(x_translated)
return hess
def __str__(self):
name = r""
if self.stochastic is not None:
name += "stochastic "
if self.function_name is not None:
name += self.function_name
else:
name += self.__class__.__name__
if self.function_formula is not None:
name += ": " + self.function_formula
return name
# SPHERE FUNCTION #############################################################
def sphere(x):
r"""The Sphere function.
The Sphere function is a famous **convex** function used to test the performance of optimization algorithms.
This function is very easy to optimize and can be used as a first test to check an optimization algorithm.
.. math::
f(\boldsymbol{x}) = \sum_{i=1}^{n} x_{i}^2
Global minimum:
.. math::
f(\boldsymbol{0}) = 0
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^n
.. image:: sphere_3d.png
.. image:: sphere.png
Example
-------
To evaluate the single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> sphere( np.array([0, 0]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate the single 3D point :math:`x = \begin{pmatrix} 1 \\ 1 \\ 1 \end{pmatrix}`:
>>> sphere( np.array([1, 1, 1]) )
3.0
The result should be :math:`f(x) = 3.0`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 2 \\ 2 \end{pmatrix}` at once:
>>> sphere( np.array([[0, 1, 2], [0, 1, 2]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([0., 2., 8.])
The result should be :math:`f(x_1) = 0`, :math:`f(x_2) = 1` and :math:`f(x_3) = 8`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Sphere function is to be computed
or a two dimension Numpy array of points at which the Sphere function is to be computed.
Returns
-------
float or array_like
The value(s) of the Sphere function for the given point(s) `x`.
See Also
--------
sphere_gradient, sphere_hessian
"""
# Remark: `sum(x**2.0)` is equivalent to `np.sum(x**2.0, axis=0)` but only the latter works if x is a scallar (e.g. x = np.float(3)).
return np.sum(x**2.0, axis=0)
def sphere_gradient(x):
"""
The derivative (i.e. gradient) of the Sphere function.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the derivative is to be computed
or a two dimension Numpy array of points at which the derivatives are to be computed.
Returns
-------
float or array_like
gradient of the Sphere function at `x`.
See Also
--------
sphere, sphere_hessian
"""
return 2.0 * x
def sphere_hessian(x):
"""
The Hessian matrix of the Sphere function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Sphere function at `x`.
See Also
--------
sphere, sphere_gradient
"""
return 2.0 * np.ones(x.shape)
class Sphere(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = sphere
self._gradient_function = sphere_gradient
self._hessian_function = sphere_hessian
self.ndim = ndim
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10.
self.bounds[1,:] = 10.
self.continuous = True
self.arg_min = np.zeros(self.ndim)
@property
def unimodal(self):
return True
sphere1d = Sphere(ndim=1)
sphere2d = Sphere(ndim=2)
# ROSENBROCK FUNCTION #########################################################
def rosen(x):
r"""The (extended) Rosenbrock function.
The Rosenbrock function is a famous **non-convex** function used to test
the performance of optimization algorithms. The classical two-dimensional
version of this function is **unimodal** but its *extended* :math:`n`-dimensional
version (with :math:`n \geq 4`) is **multimodal** [SHANG06]_.
.. math::
f(\boldsymbol{x}) = \sum_{i=1}^{n-1} \left[100 \left( x_{i+1} - x_{i}^{2} \right)^{2} + \left( x_{i} - 1 \right)^2 \right]
Global minimum:
.. math::
\min =
\begin{cases}
n = 2 & \rightarrow \quad f(1,1) = 0, \\
n = 3 & \rightarrow \quad f(1,1,1) = 0, \\
n > 3 & \rightarrow \quad f(\underbrace{1,\dots,1}_{n{\text{ times}}}) = 0 \\
\end{cases}
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^n
The Rosenbrock has exactly one (global) minimum :math:`(\underbrace{1, \dots,
1}_{n{\text{ times}}})^\top` for :math:`n \leq 3` and an additional *local*
minimum for :math:`n \geq 4` near :math:`(-1, 1, 1, \dots, 1)^\top`.
See http://www.mitpressjournals.org/doi/abs/10.1162/evco.2006.14.1.119
(freely available at http://dl.acm.org/citation.cfm?id=1118014) and
https://en.wikipedia.org/wiki/Rosenbrock_function#Multidimensional_generalisations
for more information.
See https://en.wikipedia.org/wiki/Rosenbrock_function and
http://mathworld.wolfram.com/RosenbrockFunction.html for more information.
The Rosenbrock function, its derivative (i.e. gradient) and its hessian matrix are also implemented in Scipy
([scipy.optimize.rosen](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen.html#scipy.optimize.rosen),
[scipy.optimize.rosen_der](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen_der.html#scipy.optimize.rosen_der),
[scipy.optimize.rosen_hess](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen_hess.html#scipy.optimize.rosen_hess) and
[scipy.optimize.rosen_hess_prod](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen_hess_prod.html#scipy.optimize.rosen_hess_prod)).
See [Scipy documentation](https://docs.scipy.org/doc/scipy/reference/optimize.html#rosenbrock-function) for more information.
.. image:: rosenbrock_3d.png
.. image:: rosenbrock.png
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Rosenbrock function is to be computed
or a two dimension Numpy array of points at which the Rosenbrock function is to be computed.
Returns
-------
float or array_like
The value(s) of the Rosenbrock function for the given point(s) `x`.
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> rosen( np.array([0, 0]) )
1.0
The result should be :math:`f(x) = 1`.
Example
-------
To evaluate a single 3D point :math:`x = \begin{pmatrix} 1 \\ 1 \\ 1 \end{pmatrix}`:
>>> rosen( np.array([1, 1, 1]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 2 \\ 2 \end{pmatrix}` at once:
>>> rosen( np.array([[0, 1, 2], [0, 1, 2]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([ 1., 0., 401.])
The result should be :math:`f(x_1) = 1`, :math:`f(x_2) = 0` and :math:`f(x_3) = 401`.
References
----------
.. [SHANG06] `Shang, Y. W., & Qiu, Y. H. (2006). A note on the extended Rosenbrock function. Evolutionary Computation, 14(1), 119-126. <http://www.mitpressjournals.org/doi/abs/10.1162/evco.2006.14.1.119>`_
"""
return np.sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, axis=0)
class Rosenbrock(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = rosen
self.ndim = ndim
if self.ndim < 2: # TODO
raise ValueError("The rosenbrock function is defined for solution spaces having at least 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10. # TODO
self.bounds[1,:] = 10. # TODO
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return True if self.ndim < 4 else False
rosen2d = Rosenbrock(ndim=2)
# HIMMELBLAU'S FUNCTION #######################################################
def himmelblau(x):
r"""The Himmelblau's function.
The Himmelblau's function is a two-dimensional **multimodal** function.
.. math::
f(x_1, x_2) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 - 7)^2
The function has four global minima:
.. math::
\begin{eqnarray}
f(3, 2) = 0 \\
f(-2.805118, 3.131312) = 0 \\
f(-3.779310, -3.283186) = 0 \\
f(3.584428, -1.848126) = 0
\end{eqnarray}
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^2
It also has one local maximum at :math:`f(-0.270845, -0.923039) = 181.617`.
The locations of all the minima can be found analytically (roots of cubic
polynomials) but expressions are somewhat complicated.
The function is named after David Mautner Himmelblau, who introduced it in
*Applied Nonlinear Programming* (1972), McGraw-Hill, ISBN 0-07-028921-2.
See https://en.wikipedia.org/wiki/Himmelblau%27s_function for more information.
.. image:: himmelblau_3d.png
.. image:: himmelblau.png
Example
-------
To evaluate a single point :math:`x = \begin{pmatrix} 3 \\ 2 \end{pmatrix}`:
>>> himmelblau( np.array([3, 2]) )
0.0
The result should be :math:`f(x) = 1`.
Example
-------
To evaluate multiple points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 2 \\ 2 \end{pmatrix}` at once:
>>> himmelblau( np.array([[0, 1, 2], [0, 1, 2]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([170., 106., 26.])
The result should be :math:`f(x_1) = 170`, :math:`f(x_2) = 106` and :math:`f(x_3) = 26`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Himmelblau's function is to be computed
or a two dimension Numpy array of points at which the Himmelblau's function is to be computed.
Returns
-------
float or array_like
The value(s) of the Himmelblau's function for the given point(s) `x`.
"""
assert x.shape[0] == 2, x.shape
return (x[0]**2.0 + x[1] - 11.0)**2.0 + (x[0] + x[1]**2.0 - 7.0)**2.0
class Himmelblau(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = himmelblau
self.ndim = ndim
if self.ndim != 2:
raise ValueError("The himmelblau function is defined for solution spaces having 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10. # TODO
self.bounds[1,:] = 10. # TODO
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return False
himmelblau2d = Himmelblau(ndim=2)
# RASTRIGIN FUNCTION ##########################################################
def rastrigin(x):
r"""The Rastrigin function.
The Rastrigin function is a famous **multimodal** function.
Finding the minimum of this function is a fairly difficult problem due to
its large search space and its large number of local minima.
The classical two-dimensional version of this function has been introduced
by L. A. Rastrigin in *Systems of extremal control* Mir, Moscow (1974).
Its *generalized* :math:`n`-dimensional version has been proposed by H.
Mühlenbein, D. Schomisch and J. Born in *The Parallel Genetic Algorithm as
Function Optimizer* Parallel Computing, 17, pages 619–632, 1991.
On an n-dimensional domain it is defined by:
.. math::
f(\boldsymbol{x}) = An + \sum_{i=1}^{n} \left[ x_{i}^{2} - A \cos(2 \pi x_{i}) \right]
where :math:`A = 10`.
Global minimum:
.. math::
f(\boldsymbol{0}) = 0
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^n
See https://en.wikipedia.org/wiki/Rastrigin_function for more information.
.. image:: rastrigin_3d.png
.. image:: rastrigin.png
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> rastrigin( np.array([0, 0]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate a single 3D point :math:`x = \begin{pmatrix} 1 \\ 1 \\ 1 \end{pmatrix}`:
>>> rastrigin( np.array([0, 0, 0]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 2 \\ 2 \end{pmatrix}` at once:
>>> rastrigin( np.array([[0, 1, 2], [0, 1, 2]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([ 1., 0., 401.])
The result should be :math:`f(x_1) = 1`, :math:`f(x_2) = 0` and :math:`f(x_3) = 401`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Rastrigin function is to be computed
or a two dimension Numpy array of points at which the Rastrigin function is to be computed.
Returns
-------
float or array_like
The value(s) of the Rastrigin function for the given point(s) `x`.
"""
A = 10.
n = x.shape[0]
return A * n + np.sum(x**2.0 - A * np.cos(2.0 * np.pi * x), axis=0)
class Rastrigin(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = rastrigin
self.ndim = ndim
if self.ndim < 2: # TODO
raise ValueError("The rastrigin function is defined for solution spaces having at least 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10. # TODO
self.bounds[1,:] = 10. # TODO
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return False
rastrigin2d = Rastrigin(ndim=2)
# EASOM FUNCTION ##############################################################
def easom(x):
r"""The Easom function.
The Easom function is a 2 dimensions **unimodal** function.
.. math::
f(x_1, x_2) = -\cos(x_1) \cos(x_2) \exp \left( -\left[ (x_1-\pi)^2 + (x_2-\pi)^2 \right] \right)
Global minimum:
.. math::
f(\pi, \pi) = -1
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^2
See https://www.sfu.ca/~ssurjano/easom.html for more information.
.. image:: easom_3d.png
.. image:: easom.png
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> easom( np.array([np.pi, np.pi]) )
-1.0
The result should be :math:`f(x) = -1`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} \pi \\ \pi \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` at once:
>>> easom( np.array([[np.pi, 0, 1], [np.pi, 0, 1]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([-1., -2.67528799e-09, -3.03082341e-05])
The result should be :math:`f(x_1) = -1`, :math:`f(x_2) \approx 0` and :math:`f(x_3) \approx 0`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Easom function is to be computed
or a two dimension Numpy array of points at which the Easom function is to be computed.
Returns
-------
float or array_like
The value(s) of the Easom function for the given point(s) `x`.
"""
assert x.shape[0] == 2, x.shape
return -np.cos(x[0]) * np.cos(x[1]) * np.exp(-((x[0]-np.pi)**2.0 + (x[1]-np.pi)**2.0))
class Easom(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = easom
self.ndim = ndim
if self.ndim != 2:
raise ValueError("The easom function is defined for solution spaces having 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10. # TODO
self.bounds[1,:] = 10. # TODO
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return True
easom2d = Easom(ndim=2)
# CROSS-IN-TRAY FUNCTION ######################################################
def crossintray(x):
r"""The Cross-in-tray function.
The Cross-in-tray function is a 2 dimensions **multimodal** function, with
four global minima.
.. math::
f(x_1, x_2) = -0.0001 \left( \left| \sin(x_1) \sin(x_2) \exp \left( \left| 100 - \frac{\sqrt{x_1^2 + x_2^2}}{\pi} \right| \right)\right| + 1 \right)^{0.1}
Global minima:
.. math::
\text{Min} =
\begin{cases}
f(1.34941, -1.34941) &= -2.06261 \\
f(1.34941, 1.34941) &= -2.06261 \\
f(-1.34941, 1.34941) &= -2.06261 \\
f(-1.34941, -1.34941) &= -2.06261 \\
\end{cases}
Search domain:
.. math::
-10 \leq x_1, x_2 \leq 10
**References**: *Test functions for optimization* (Wikipedia):
https://en.wikipedia.org/wiki/Test_functions_for_optimization.
.. image:: cross_in_tray_3d.png
.. image:: cross_in_tray.png
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> crossintray( np.array([0, 0]) )
-0.0001
The result should be :math:`f(x) = -0.0001`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1.34941 \\ 1.34941 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} -1.34941 \\ -1.34941 \end{pmatrix}` at once:
>>> crossintray( np.array([[0, 1.34941, -1.34941], [0, 1.34941, -1.34941]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([ -0.0001, -2.06261, -2.06261])
The result should be :math:`f(x_1) = -0.0001`, :math:`f(x_2) = -2.06261` and :math:`f(x_3) = -2.06261`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Cross-in-tray function is to be computed
or a two dimension Numpy array of points at which the Cross-in-tray function is to be computed.
Returns
-------
float or array_like
The value(s) of the Cross-in-tray function for the given point(s) `x`.
"""
assert x.shape[0] == 2, x.shape
return -0.0001 * (np.abs(np.sin(x[0]) * np.sin(x[1]) * np.exp( np.abs( 100.0 - np.sqrt(x[0]**2.0 + x[1]**2.0)/np.pi ))) + 1.0)**0.1
class Crossintray(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = crossintray
self.ndim = ndim
if self.ndim != 2:
raise ValueError("The crossintray function is defined for solution spaces having 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10.
self.bounds[1,:] = 10.
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return False
crossintray2d = Crossintray(ndim=2)
# HÖLDER TABLE FUNCTION #######################################################
def holder(x):
r"""The Hölder table function.
The Hölder table function is a 2 dimensions **multimodal** function, with
four global minima.
.. math::
f(x_1, x_2) =
-\left| \sin(x_1) \cos(x_2) \exp \left( \left| 1 - \frac{\sqrt{x_1^2 + x_2^2}}{\pi} \right| \right) \right|
Global minima:
.. math::
\text{Min} =
\begin{cases}
f(8.05502, 9.66459) &= -19.2085 \\
f(-8.05502, 9.66459) &= -19.2085 \\
f(8.05502, -9.66459) &= -19.2085 \\
f(-8.05502, -9.66459) &= -19.2085
\end{cases}
Search domain:
.. math::
-10 \leq x_1, x_2 \leq 10
**References**: *Test functions for optimization* (Wikipedia):
https://en.wikipedia.org/wiki/Test_functions_for_optimization.
.. image:: holder_3d.png
.. image:: holder.png
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> holder( np.array([0, 0]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 0 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 1 \\ 0 \end{pmatrix}` at once:
>>> holder( np.array([[0., 0., 1.], [0., 1., 0.]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([-0. , -0. , -1.66377043])
The result should be :math:`f(x_1) = 0`, :math:`f(x_2) = 0` and :math:`f(x_3) = -1.66377043`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Hölder table function is to be computed
or a two dimension Numpy array of points at which the Hölder table function is to be computed.
Returns
-------
float or array_like
The value(s) of the Hölder table function for the given point(s) `x`.
"""
assert x.shape[0] == 2, x.shape
return -np.abs(np.sin(x[0]) * np.cos(x[1]) * np.exp(np.abs(1.0 - np.sqrt(x[0]**2.0 + x[1]**2.0)/np.pi )))
class Holder(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = holder
self.ndim = ndim
if self.ndim != 2:
raise ValueError("The holder function is defined for solution spaces having 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10.
self.bounds[1,:] = 10.
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return False
holder2d = Holder(ndim=2)
| mit | -930,977,634,268,075,000 | 28.453284 | 209 | 0.565847 | false | 3.447272 | false | false | false |
gavein/sleeping-god | SleepingGodObjects/Vessel.py | 1 | 2849 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
from Constants import WEAR_AT_TURN, OXYGEN_AT_TURN, CARGO_WATER, CARGO_MINERALS
from SleepingGodObjects.GameObjects import GameObject
class Vessel(GameObject):
def __init__(
self,
pos_x,
pos_y,
char,
label,
color,
blocks,
cargo={},
oxygen=0,
hull=0,
wear_resistance=0):
GameObject.__init__(
self,
pos_x,
pos_y,
char,
label,
color,
blocks)
self.cargo = cargo
self.cargo_keys = [
CARGO_WATER,
CARGO_MINERALS
]
for key in self.cargo_keys:
if not self.cargo.has_key(key):
self.cargo[key] = 0
self.oxygen = oxygen
self.oxygen_max = oxygen
self.hull = hull
self.wear = hull
self.wear_resistance = wear_resistance
def move(self, dx, dy):
self.pos_x += dx
self.pos_y += dy
turn_wear = WEAR_AT_TURN - self.wear_resistance
self.wear -= turn_wear
self.oxygen -= OXYGEN_AT_TURN
def cargo_info(self, key):
if self.cargo.has_key(key):
return self.cargo[key]
class PlayerVessel(Vessel):
SOLAR_SAIL = u"фотонный парус"
def __init__(
self,
pos_x,
pos_y,
char,
label,
color,
blocks,
cargo={},
oxygen=0,
hull=0,
wear_resistance=0,
propulsion=SOLAR_SAIL):
Vessel.__init__(
self,
pos_x,
pos_y,
char,
label,
color,
blocks,
cargo,
oxygen,
hull,
wear_resistance)
self.propulsion = propulsion
self.abilities = []
def increase_resources(self, minerals, water):
self.cargo[CARGO_MINERALS] += minerals
self.cargo[CARGO_WATER] += water
def add_ability(self, ability):
self.abilities.append(ability)
def get_ability_name(self, abilitY):
return ability.name
def get_ability_description(self, ability):
return ability.description
def use_ability(self, ability, *args):
if ability in self.abilities:
ability.use(args)
| gpl-3.0 | -8,537,353,593,604,741,000 | 25.259259 | 79 | 0.420663 | false | 4.158358 | false | false | false |
ipfire/collecty | src/collecty/plugins/processor.py | 1 | 7032 | #!/usr/bin/python3
###############################################################################
# #
# collecty - A system statistics collection daemon for IPFire #
# Copyright (C) 2012 IPFire development team #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import multiprocessing
from . import base
from ..colours import *
from ..constants import *
from ..i18n import _
class GraphTemplateProcessor(base.GraphTemplate):
name = "processor"
@property
def rrd_graph(self):
return [
# Add all used CPU cycles
"CDEF:usage=user,nice,+,sys,+,wait,+,irq,+,sirq,+,steal,+,guest,+,guest_nice,+",
# Add idle to get the total number of cycles
"CDEF:total=usage,idle,+",
# Headline
"COMMENT:%s" % EMPTY_LABEL,
"COMMENT:%s" % (COLUMN % _("Current")),
"COMMENT:%s" % (COLUMN % _("Average")),
"COMMENT:%s" % (COLUMN % _("Minimum")),
"COMMENT:%s\\j" % (COLUMN % _("Maximum")),
"CDEF:usage_p=100,usage,*,total,/",
"COMMENT: %s" % (LABEL % _("Total")),
"GPRINT:usage_p_cur:%s" % PERCENTAGE,
"GPRINT:usage_p_avg:%s" % PERCENTAGE,
"GPRINT:usage_p_min:%s" % PERCENTAGE,
"GPRINT:usage_p_max:%s\\j" % PERCENTAGE,
EMPTY_LINE,
"CDEF:user_p=100,user,*,total,/",
"AREA:user_p%s:%s" % (
transparency(CPU_USER, AREA_OPACITY),
LABEL % _("User"),
),
"GPRINT:user_p_cur:%s" % PERCENTAGE,
"GPRINT:user_p_avg:%s" % PERCENTAGE,
"GPRINT:user_p_min:%s" % PERCENTAGE,
"GPRINT:user_p_max:%s\\j" % PERCENTAGE,
"CDEF:nice_p=100,nice,*,total,/",
"AREA:nice_p%s:%s:STACK" % (
transparency(CPU_NICE, AREA_OPACITY),
LABEL % _("Nice"),
),
"GPRINT:nice_p_cur:%s" % PERCENTAGE,
"GPRINT:nice_p_avg:%s" % PERCENTAGE,
"GPRINT:nice_p_min:%s" % PERCENTAGE,
"GPRINT:nice_p_max:%s\\j" % PERCENTAGE,
"CDEF:sys_p=100,sys,*,total,/",
"AREA:sys_p%s:%s:STACK" % (
transparency(CPU_SYS, AREA_OPACITY),
LABEL % _("System"),
),
"GPRINT:sys_p_cur:%s" % PERCENTAGE,
"GPRINT:sys_p_avg:%s" % PERCENTAGE,
"GPRINT:sys_p_min:%s" % PERCENTAGE,
"GPRINT:sys_p_max:%s\\j" % PERCENTAGE,
"CDEF:wait_p=100,wait,*,total,/",
"AREA:wait_p%s:%s:STACK" % (
transparency(CPU_WAIT, AREA_OPACITY),
LABEL % _("Wait"),
),
"GPRINT:wait_p_cur:%s" % PERCENTAGE,
"GPRINT:wait_p_avg:%s" % PERCENTAGE,
"GPRINT:wait_p_min:%s" % PERCENTAGE,
"GPRINT:wait_p_max:%s\\j" % PERCENTAGE,
"CDEF:irq_p=100,irq,*,total,/",
"AREA:irq_p%s:%s:STACK" % (
transparency(CPU_IRQ, AREA_OPACITY),
LABEL % _("Interrupt"),
),
"GPRINT:irq_p_cur:%s" % PERCENTAGE,
"GPRINT:irq_p_avg:%s" % PERCENTAGE,
"GPRINT:irq_p_min:%s" % PERCENTAGE,
"GPRINT:irq_p_max:%s\\j" % PERCENTAGE,
"CDEF:sirq_p=100,sirq,*,total,/",
"AREA:sirq_p%s:%s:STACK" % (
transparency(CPU_SIRQ, AREA_OPACITY),
LABEL % _("Soft Interrupt"),
),
"GPRINT:sirq_p_cur:%s" % PERCENTAGE,
"GPRINT:sirq_p_avg:%s" % PERCENTAGE,
"GPRINT:sirq_p_min:%s" % PERCENTAGE,
"GPRINT:sirq_p_max:%s\\j" % PERCENTAGE,
"CDEF:steal_p=100,steal,*,total,/",
"AREA:steal_p%s:%s:STACK" % (
transparency(CPU_STEAL, AREA_OPACITY),
LABEL % _("Steal"),
),
"GPRINT:steal_p_cur:%s" % PERCENTAGE,
"GPRINT:steal_p_avg:%s" % PERCENTAGE,
"GPRINT:steal_p_min:%s" % PERCENTAGE,
"GPRINT:steal_p_max:%s\\j" % PERCENTAGE,
"CDEF:guest_p=100,guest,*,total,/",
"AREA:guest_p%s:%s:STACK" % (
transparency(CPU_GUEST, AREA_OPACITY),
LABEL % _("Guest"),
),
"GPRINT:guest_p_cur:%s" % PERCENTAGE,
"GPRINT:guest_p_avg:%s" % PERCENTAGE,
"GPRINT:guest_p_min:%s" % PERCENTAGE,
"GPRINT:guest_p_max:%s\\j" % PERCENTAGE,
"CDEF:guest_nice_p=100,guest_nice,*,total,/",
"AREA:guest_nice_p%s:%s:STACK" % (
transparency(CPU_GUEST_NICE, AREA_OPACITY),
LABEL % _("Guest Nice"),
),
"GPRINT:guest_nice_p_cur:%s" % PERCENTAGE,
"GPRINT:guest_nice_p_avg:%s" % PERCENTAGE,
"GPRINT:guest_nice_p_min:%s" % PERCENTAGE,
"GPRINT:guest_nice_p_max:%s\\j" % PERCENTAGE,
"CDEF:idle_p=100,idle,*,total,/",
"AREA:idle_p%s::STACK" % CPU_IDLE,
# Draw contour lines
"LINE:user_p%s" % CPU_USER,
"LINE:nice_p%s::STACK" % CPU_NICE,
"LINE:sys_p%s::STACK" % CPU_SYS,
"LINE:wait_p%s::STACK" % CPU_WAIT,
"LINE:irq_p%s::STACK" % CPU_IRQ,
"LINE:sirq_p%s::STACK" % CPU_SIRQ,
"LINE:steal_p%s::STACK" % CPU_STEAL,
"LINE:guest_p%s::STACK" % CPU_GUEST,
"LINE:guest_nice_p%s::STACK" % CPU_GUEST_NICE,
]
upper_limit = 100
lower_limit = 0
@property
def graph_title(self):
return _("Processor Usage")
@property
def graph_vertical_label(self):
return _("Percent")
class ProcessorObject(base.Object):
rrd_schema = [
"DS:user:DERIVE:0:U",
"DS:nice:DERIVE:0:U",
"DS:sys:DERIVE:0:U",
"DS:idle:DERIVE:0:U",
"DS:wait:DERIVE:0:U",
"DS:irq:DERIVE:0:U",
"DS:sirq:DERIVE:0:U",
"DS:steal:DERIVE:0:U",
"DS:guest:DERIVE:0:U",
"DS:guest_nice:DERIVE:0:U",
]
def init(self, cpu_id=None):
self.cpu_id = cpu_id
@property
def id(self):
if self.cpu_id is not None:
return "%s" % self.cpu_id
return "default"
def collect(self):
"""
Reads the CPU usage.
"""
stat = self.read_proc_stat()
if self.cpu_id is None:
values = stat.get("cpu")
else:
values = stat.get("cpu%s" % self.cpu_id)
# Convert values into a list
values = values.split()
if not len(values) == len(self.rrd_schema):
raise ValueError("Received unexpected output from /proc/stat: %s" % values)
return values
class ProcessorPlugin(base.Plugin):
name = "processor"
description = "Processor Usage Plugin"
templates = [GraphTemplateProcessor]
@property
def objects(self):
yield ProcessorObject(self)
num = multiprocessing.cpu_count()
for i in range(num):
yield ProcessorObject(self, cpu_id=i)
| gpl-3.0 | -3,983,568,614,264,902,000 | 29.441558 | 83 | 0.558447 | false | 2.773964 | false | false | false |
cloudify-cosmo/cloudify-nsx-plugin | cloudify_nsx/network/dhcp_bind.py | 1 | 5021 | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify import ctx
from cloudify.decorators import operation
import cloudify_nsx.library.nsx_common as common
from cloudify import exceptions as cfy_exc
import cloudify_nsx.library.nsx_esg_dlr as nsx_dhcp
@operation
def create(**kwargs):
validation_rules = {
"esg_id": {
"required": True
},
"vm_id": {
"set_none": True
},
"vnic_id": {
"set_none": True,
"type": "string"
},
"mac": {
"set_none": True
},
"hostname": {
"required": True
},
"ip": {
"required": True
},
"default_gateway": {
"set_none": True
},
"subnet_mask": {
"set_none": True
},
"domain_name": {
"set_none": True
},
"dns_server_1": {
"set_none": True
},
"dns_server_2": {
"set_none": True
},
"lease_time": {
"set_none": True
},
"auto_dns": {
"set_none": True
}
}
use_existing, bind_dict = common.get_properties_and_validate(
'bind', kwargs, validation_rules
)
if use_existing:
ctx.logger.info("Used pre existed!")
return
resource_id = ctx.instance.runtime_properties.get('resource_id')
if resource_id:
ctx.logger.info("Reused %s" % resource_id)
return
# credentials
client_session = common.nsx_login(kwargs)
if bind_dict.get('mac'): # if NONE skip this part
resource_id = nsx_dhcp.add_mac_binding(client_session,
bind_dict['esg_id'],
bind_dict['mac'],
bind_dict['hostname'],
bind_dict['ip'],
bind_dict['default_gateway'],
bind_dict['subnet_mask'],
bind_dict['domain_name'],
bind_dict['dns_server_1'],
bind_dict['dns_server_2'],
bind_dict['lease_time'],
bind_dict['auto_dns'])
elif bind_dict.get('vnic_id') is not None and bind_dict.get('vm_id'):
resource_id = nsx_dhcp.add_vm_binding(client_session,
bind_dict['esg_id'],
bind_dict['vm_id'],
bind_dict['vnic_id'],
bind_dict['hostname'],
bind_dict['ip'],
bind_dict['default_gateway'],
bind_dict['subnet_mask'],
bind_dict['domain_name'],
bind_dict['dns_server_1'],
bind_dict['dns_server_2'],
bind_dict['lease_time'],
bind_dict['auto_dns'])
else:
raise cfy_exc.NonRecoverableError(
"Please fill vm_id/vnic_id or mac"
)
ctx.instance.runtime_properties['resource_id'] = resource_id
ctx.logger.info("Binded %s | %s" % (resource_id, bind_dict))
@operation
def delete(**kwargs):
use_existing, bind_dict = common.get_properties('bind', kwargs)
if use_existing:
common.remove_properties('bind')
ctx.logger.info("Used pre existed!")
return
resource_id = ctx.instance.runtime_properties.get('resource_id')
if not resource_id:
common.remove_properties('bind')
ctx.logger.info("We dont have resource_id")
return
# credentials
client_session = common.nsx_login(kwargs)
common.attempt_with_rerun(
nsx_dhcp.delete_dhcp_binding,
client_session=client_session,
resource_id=resource_id
)
ctx.logger.info("deleted %s" % resource_id)
common.remove_properties('bind')
| apache-2.0 | 6,749,471,616,254,207,000 | 33.390411 | 79 | 0.468632 | false | 4.495076 | false | false | false |
cajone/pychess | lib/pychess/widgets/pydock/PyDockTop.py | 1 | 9627 | from __future__ import absolute_import
from __future__ import print_function
import os
from xml.dom import minidom
from collections import defaultdict
from pychess.System.prefix import addDataPrefix
from .PyDockLeaf import PyDockLeaf
from .PyDockComposite import PyDockComposite
from .ArrowButton import ArrowButton
from .HighlightArea import HighlightArea
from .__init__ import TabReceiver
from .__init__ import NORTH, EAST, SOUTH, WEST, CENTER
class PyDockTop(PyDockComposite, TabReceiver):
def __init__(self, id, perspective):
TabReceiver.__init__(self, perspective)
self.id = id
self.perspective = perspective
self.set_no_show_all(True)
self.highlightArea = HighlightArea(self)
self.button_cids = defaultdict(list)
self.buttons = (
ArrowButton(self, addDataPrefix("glade/dock_top.svg"), NORTH),
ArrowButton(self, addDataPrefix("glade/dock_right.svg"), EAST),
ArrowButton(self, addDataPrefix("glade/dock_bottom.svg"), SOUTH),
ArrowButton(self, addDataPrefix("glade/dock_left.svg"), WEST))
for button in self.buttons:
self.button_cids[button] += [
button.connect("dropped", self.__onDrop),
button.connect("hovered", self.__onHover),
button.connect("left", self.__onLeave),
]
def _del(self):
self.highlightArea.disconnect(self.highlightArea.cid)
for button in self.buttons:
for cid in self.button_cids[button]:
button.disconnect(cid)
button.myparent = None
self.button_cids = {}
self.highlightArea.myparent = None
#self.buttons = None
#self.highlightArea = None
TabReceiver._del(self)
PyDockComposite._del(self)
def getPosition(self):
return CENTER
def __repr__(self):
return "top (%s)" % self.id
# ===========================================================================
# Component stuff
# ===========================================================================
def addComponent(self, widget):
self.add(widget)
widget.show()
def changeComponent(self, old, new):
self.removeComponent(old)
self.addComponent(new)
def removeComponent(self, widget):
self.remove(widget)
def getComponents(self):
child = self.get_child()
if isinstance(child, PyDockComposite) or isinstance(child, PyDockLeaf):
return [child]
return []
def dock(self, widget, position, title, id):
if not self.getComponents():
leaf = PyDockLeaf(widget, title, id, self.perspective)
self.addComponent(leaf)
return leaf
else:
return self.get_child().dock(widget, position, title, id)
def clear(self):
self.remove(self.get_child())
# ===========================================================================
# Signals
# ===========================================================================
def showArrows(self):
for button in self.buttons:
button._calcSize()
button.show()
def hideArrows(self):
for button in self.buttons:
button.hide()
self.highlightArea.hide()
def __onDrop(self, arrowButton, sender):
self.highlightArea.hide()
child = sender.get_nth_page(sender.get_current_page())
title, id = sender.get_parent().undock(child)
self.dock(child, arrowButton.myposition, title, id)
def __onHover(self, arrowButton, widget):
self.highlightArea.showAt(arrowButton.myposition)
arrowButton.get_window().raise_()
def __onLeave(self, arrowButton):
self.highlightArea.hide()
# ===========================================================================
# XML
# ===========================================================================
def saveToXML(self, xmlpath):
"""
<docks>
<dock id="x">
<v pos="200">
<leaf current="x" dockable="False">
<panel id="x" />
</leaf>
<h pos="200">
<leaf current="y" dockable="True">
<panel id="y" />
<panel id="z" />
</leaf>
<leaf current="y" dockable="True">
<panel id="y" />
</leaf>
</h>
</v>
</dock>
</docks>
"""
dockElem = None
if os.path.isfile(xmlpath):
doc = minidom.parse(xmlpath)
for elem in doc.getElementsByTagName("dock"):
if elem.getAttribute("id") == self.id:
for node in elem.childNodes:
elem.removeChild(node)
dockElem = elem
break
if not dockElem:
doc = minidom.getDOMImplementation().createDocument(None, "docks",
None)
dockElem = doc.createElement("dock")
dockElem.setAttribute("id", self.id)
doc.documentElement.appendChild(dockElem)
if self.get_child():
self.__addToXML(self.get_child(), dockElem, doc)
f_handle = open(xmlpath, "w")
doc.writexml(f_handle)
f_handle.close()
doc.unlink()
def __addToXML(self, component, parentElement, document):
if isinstance(component, PyDockComposite):
pos = component.paned.get_position()
if component.getPosition() in (NORTH, SOUTH):
childElement = document.createElement("v")
size = float(component.get_allocation().height)
else:
childElement = document.createElement("h")
size = float(component.get_allocation().width)
# if component.getPosition() in (NORTH, SOUTH):
# print "saving v position as %s out of %s (%s)" % (str(pos), str(size), str(pos/max(size,pos)))
childElement.setAttribute("pos", str(pos / max(size, pos)))
self.__addToXML(component.getComponents()[0], childElement,
document)
self.__addToXML(component.getComponents()[1], childElement,
document)
elif isinstance(component, PyDockLeaf):
childElement = document.createElement("leaf")
childElement.setAttribute("current", component.getCurrentPanel())
childElement.setAttribute("dockable", str(component.isDockable()))
for panel, title, id in component.getPanels():
element = document.createElement("panel")
element.setAttribute("id", id)
childElement.appendChild(element)
parentElement.appendChild(childElement)
def loadFromXML(self, xmlpath, idToWidget):
""" idTowidget is a dictionary {id: (widget,title)}
asserts that self.id is in the xmlfile """
doc = minidom.parse(xmlpath)
for elem in doc.getElementsByTagName("dock"):
if elem.getAttribute("id") == self.id:
break
else:
raise AttributeError(
"XML file contains no <dock> elements with id '%s'" % self.id)
child = [n for n in elem.childNodes if isinstance(n, minidom.Element)]
if child:
self.addComponent(self.__createWidgetFromXML(child[0], idToWidget))
def __createWidgetFromXML(self, parentElement, idToWidget):
children = [n
for n in parentElement.childNodes
if isinstance(n, minidom.Element)]
if parentElement.tagName in ("h", "v"):
child1, child2 = children
if parentElement.tagName == "h":
new = PyDockComposite(EAST, self.perspective)
else:
new = PyDockComposite(SOUTH, self.perspective)
new.initChildren(
self.__createWidgetFromXML(child1, idToWidget),
self.__createWidgetFromXML(child2, idToWidget),
preserve_dimensions=True)
def cb(widget, event, pos):
allocation = widget.get_allocation()
if parentElement.tagName == "h":
widget.set_position(int(allocation.width * pos))
else:
# print "loading v position as %s out of %s (%s)" % \
# (int(allocation.height * pos), str(allocation.height), str(pos))
widget.set_position(int(allocation.height * pos))
widget.disconnect(conid)
conid = new.paned.connect("size-allocate", cb, float(parentElement.getAttribute("pos")))
return new
elif parentElement.tagName == "leaf":
id = children[0].getAttribute("id")
title, widget = idToWidget[id]
leaf = PyDockLeaf(widget, title, id, self.perspective)
for panelElement in children[1:]:
id = panelElement.getAttribute("id")
title, widget = idToWidget[id]
leaf.dock(widget, CENTER, title, id)
leaf.setCurrentPanel(parentElement.getAttribute("current"))
if parentElement.getAttribute("dockable").lower() == "false":
leaf.setDockable(False)
return leaf
| gpl-3.0 | 4,931,489,659,849,762,000 | 36.901575 | 112 | 0.52976 | false | 4.469359 | false | false | false |
xju2/hzzws | scripts/low_mass.py | 1 | 2906 | #!/usr/bin/env python
import common
import glob
name = "Low"
binning = "60, 110, 140"
branch = "m4l_constrained, "+binning
###in workspace
obs_binning = binning
# key: category name
# value: TCut on mini-tree
categories = {
"ggF_4mu_13TeV" : "(event_type==0)",
"ggF_2mu2e_13TeV" : "(event_type==2)",
"ggF_2e2mu_13TeV" : "(event_type==3)",
"ggF_4e_13TeV" : "(event_type==1)",
}
#categories = {"all" : "(1==1)"}
sig_samples = ["ggH", "VBFH", "ZH", "WH", "ttH"]
bkg_samples = ["qqZZ", "Zjets",
"ggZZ"
]
samples = sig_samples + bkg_samples
samples_para = samples
samples_lowmass_sig125 = {
"ggH":common.minitree_dir+"mc15_13TeV.341505.PowhegPythia8EvtGen_CT10_AZNLOCTEQ6L1_ggH125_ZZ4lep_noTau.root",
"VBFH":common.minitree_dir+"mc15_13TeV.341518.PowhegPythia8EvtGen_CT10_AZNLOCTEQ6L1_VBFH125_ZZ4lep_noTau.root",
"WH":common.minitree_dir+"mc15_13TeV.341964.Pythia8EvtGen_A14NNPDF23LO_WH125_ZZ4l.root",
"ZH":common.minitree_dir+"mc15_13TeV.341947.Pythia8EvtGen_A14NNPDF23LO_ZH125_ZZ4l.root",
"ttH":common.minitree_dir+"mc15_13TeV.342561.aMcAtNloHerwigppEvtGen_UEEE5_CTEQ6L1_CT10ME_ttH125_4l.root",
}
#masses = [124, 125, 126]
masses = [125]
mass_points = len(masses)
def get_mass(im):
return masses[im]
def get_sample_dict(mass):
tmp_res = {}
sample_list = sig_samples
for sample_name in sample_list:
pattern = common.minitree_dir+"*"+sample_name+str(mass)+"_*4l*.root"
file_list = glob.glob(pattern)
#print mass,len(file_list), file_list
if len(file_list) == 1:
tmp_res[sample_name] = file_list[0]
elif len(file_list) == 2:
for ff in file_list:
if "noTau" in ff:
tmp_res[sample_name] = ff
return tmp_res
def get_signal_dict():
tmp_dic = {}
for im in range(mass_points):
mass = get_mass(im)
tmp_dic[str(mass)] = get_sample_dict(mass)
return tmp_dic
samples_sig = get_signal_dict()
samples_bkg = {
#"qqZZ":common.minitree_dir+"mc15_13TeV.342556.PowhegPy8EG_CT10nloME_AZNLOCTEQ6L1_ZZllll_mll4_m4l_100_150.root",
"qqZZ":"/afs/cern.ch/atlas/groups/HSG2/H4l/run2/2015/MiniTrees/Prod_v03/mc_15b/Nominal/mc15_13TeV.342556.PowhegPy8EG_CT10nloME_AZNLOCTEQ6L1_ZZllll_mll4_m4l_100_150.root",
#"Zjets":common.minitree_dir+"combined/mc15_redBkg_filtered.root"
"Zjets":"/afs/cern.ch/atlas/groups/HSG2/H4l/run2/2015/MiniTrees/Prod_v01/mc/Nominal/combined/mc15_redBkg_filtered.root",
"ggZZ":common.minitree_dir+"mc15_gg2ZZ_low.root",
}
def print_samples():
for sample,add in samples_bkg.iteritems():
print sample,add
for sample,add in samples_sig["125"].iteritems():
print sample,add
#print_samples()
samples_sig_scale = 1.0
samples_bkg_scale = 1.0
data = common.minitree_dir+"../../data15_grl_v73.root"
if __name__ == "__main__":
print_samples()
| mit | -8,342,612,487,512,922,000 | 32.790698 | 174 | 0.652787 | false | 2.41965 | false | false | false |
Lothiraldan/OneTask | onetask/tests.py | 1 | 4351 | # -*- coding: utf-8 -*-
import os
import json
import tempfile
import unittest
from .collection import TaskCollection
from subprocess import check_output, CalledProcessError
class TaskCollectionTest(unittest.TestCase):
def _create_db(self, **kwargs):
temp = tempfile.NamedTemporaryFile(prefix='onetasktest', suffix='.json',
mode='w+t', delete=False)
temp.write(json.dumps(dict(**kwargs)))
temp.read()
return temp
def _load(self, **kwargs):
temp = self._create_db(**kwargs)
return TaskCollection.load(temp.name)
def assertCommandOK(self, command):
try:
check_output(command)
except CalledProcessError as err:
raise AssertionError('Command is not ok: ' % err)
def assertCommandKO(self, command):
assert isinstance(command, (list, tuple,))
self.assertRaises(CalledProcessError, check_output, command)
def test_load(self):
tasks = self._load(tasks=[{"title": "task1"}, {"title": "task2"}])
self.assertEquals(len(tasks.data['tasks']), 2)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task1')
self.assertEquals(tasks.data['tasks'][1]['title'], 'task2')
def test_add(self):
tasks = self._load(tasks=[])
tasks.add('task1')
self.assertEquals(len(tasks.data['tasks']), 1)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task1')
tasks.add('task2')
self.assertEquals(len(tasks.data['tasks']), 2)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task1')
tasks.add('task3')
self.assertEquals(len(tasks.data['tasks']), 3)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task1')
def test_get(self):
tasks = self._load(tasks=[{"title": "task1", "created": 1000}],
current=None, archive=[])
self.assertEqual(tasks.get(), 'task1')
for x in range(2, 100):
tasks.add('task%d' % x)
self.assertEqual(len(tasks.data['tasks']), x - 1)
self.assertEquals(tasks.get(), 'task1')
tasks.done(closed=3000)
self.assertEqual(len(tasks.data['tasks']), x - 1)
self.assertNotEquals(tasks.get(), 'task1')
self.assertEquals(tasks.data['archive'][0]['title'], 'task1')
self.assertEquals(tasks.data['archive'][0]['duration'], 2000)
def test_done(self):
tasks = self._load(tasks=[], current=None, archive=[])
tasks.add('task1')
self.assertEquals(tasks.get(), 'task1')
self.assertEquals(len(tasks.data['tasks']), 0)
tasks.add('task2')
self.assertEquals(tasks.get(), 'task1')
self.assertEquals(len(tasks.data['tasks']), 1)
self.assertEquals(len(tasks.data['archive']), 0)
tasks.done()
self.assertEquals(len(tasks.data['tasks']), 1)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task2')
self.assertEquals(len(tasks.data['archive']), 1)
self.assertEquals(tasks.data['archive'][0]['title'], 'task1')
tasks.get()
tasks.done()
self.assertEquals(len(tasks.data['tasks']), 0)
self.assertEquals(len(tasks.data['archive']), 2)
self.assertEquals(tasks.data['archive'][0]['title'], 'task1')
self.assertEquals(tasks.data['archive'][1]['title'], 'task2')
def test_skip(self):
tasks = self._load(tasks=[{"title": "task1"},
{"title": "task2"},
{"title": "task3"}],
current=None)
current = tasks.get()
for i in range(4):
tasks.skip()
new = tasks.get()
self.assertNotEquals(current, new)
current = new
def test_cli(self):
tmp_path = self._create_db(current=None, tasks=[], archive=[]).name
os.environ['ONETASK_DB'] = tmp_path
executable = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'bin', 'onetask'))
self.assertCommandOK([executable])
self.assertCommandOK([executable, 'add', 'plop'])
self.assertEquals(check_output([executable, 'get']), b'plop\n')
self.assertCommandOK([executable, 'done'])
self.assertCommandKO([executable, 'get'])
if __name__ == '__main__':
unittest.main()
| mit | -1,090,320,991,968,640,100 | 38.198198 | 80 | 0.580326 | false | 3.84364 | true | false | false |
looooo/pivy | scons/scons-local-1.2.0.d20090919/SCons/Tool/ifort.py | 1 | 3365 | """SCons.Tool.ifort
Tool-specific initialization for newer versions of the Intel Fortran Compiler
for Linux/Windows (and possibly Mac OS X).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ifort.py 4369 2009/09/19 15:58:29 scons"
import string
import SCons.Defaults
from SCons.Scanner.Fortran import FortranScan
from FortranCommon import add_all_to_env
def generate(env):
"""Add Builders and construction variables for ifort to an Environment."""
# ifort supports Fortran 90 and Fortran 95
# Additionally, ifort recognizes more file extensions.
fscan = FortranScan("FORTRANPATH")
SCons.Tool.SourceFileScanner.add_scanner('.i', fscan)
SCons.Tool.SourceFileScanner.add_scanner('.i90', fscan)
if 'FORTRANFILESUFFIXES' not in env:
env['FORTRANFILESUFFIXES'] = ['.i']
else:
env['FORTRANFILESUFFIXES'].append('.i')
if 'F90FILESUFFIXES' not in env:
env['F90FILESUFFIXES'] = ['.i90']
else:
env['F90FILESUFFIXES'].append('.i90')
add_all_to_env(env)
fc = 'ifort'
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
env['%s' % dialect] = fc
env['SH%s' % dialect] = '$%s' % dialect
if env['PLATFORM'] == 'posix':
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS -fPIC' % dialect)
if env['PLATFORM'] == 'win32':
# On Windows, the ifort compiler specifies the object on the
# command line with -object:, not -o. Massage the necessary
# command-line construction variables.
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
for var in ['%sCOM' % dialect, '%sPPCOM' % dialect,
'SH%sCOM' % dialect, 'SH%sPPCOM' % dialect]:
env[var] = string.replace(env[var], '-o $TARGET', '-object:$TARGET')
env['FORTRANMODDIRPREFIX'] = "/module:"
else:
env['FORTRANMODDIRPREFIX'] = "-module "
def exists(env):
return env.Detect('ifort')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| isc | -7,407,020,927,223,222,000 | 36.388889 | 89 | 0.685587 | false | 3.714128 | false | false | false |
bitglue/shinysdr | shinysdr/plugins/vor/__init__.py | 1 | 9491 | # Copyright 2013, 2014, 2015, 2016, 2017 Kevin Reid <[email protected]>
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
# TODO: fully clean up this GRC-generated file
from __future__ import absolute_import, division
import math
import os.path
from twisted.web import static
from zope.interface import implementer
from gnuradio import analog
from gnuradio import blocks
from gnuradio import fft
from gnuradio import gr
from gnuradio import filter as grfilter # don't shadow builtin
from gnuradio.filter import firdes
from shinysdr.filters import make_resampler
from shinysdr.interfaces import ClientResourceDef, ModeDef, IDemodulator, IModulator
from shinysdr.plugins.basic_demod import SimpleAudioDemodulator, design_lofi_audio_filter
from shinysdr.signals import SignalType
from shinysdr.types import QuantityT, RangeT
from shinysdr import units
from shinysdr.values import ExportedState, exported_value, setter
audio_modulation_index = 0.07
fm_subcarrier = 9960
fm_deviation = 480
@implementer(IDemodulator)
class VOR(SimpleAudioDemodulator):
def __init__(self, mode='VOR', zero_point=59, **kwargs):
self.channel_rate = channel_rate = 40000
internal_audio_rate = 20000 # TODO over spec'd
self.zero_point = zero_point
transition = 5000
SimpleAudioDemodulator.__init__(self,
mode=mode,
audio_rate=internal_audio_rate,
demod_rate=channel_rate,
band_filter=fm_subcarrier * 1.25 + fm_deviation + transition / 2,
band_filter_transition=transition,
**kwargs)
self.dir_rate = dir_rate = 10
if internal_audio_rate % dir_rate != 0:
raise ValueError('Audio rate %s is not a multiple of direction-finding rate %s' % (internal_audio_rate, dir_rate))
self.dir_scale = dir_scale = internal_audio_rate // dir_rate
self.audio_scale = audio_scale = channel_rate // internal_audio_rate
self.zeroer = blocks.add_const_vff((zero_point * (math.pi / 180), ))
self.dir_vector_filter = grfilter.fir_filter_ccf(1, firdes.low_pass(
1, dir_rate, 1, 2, firdes.WIN_HAMMING, 6.76))
self.am_channel_filter_block = grfilter.fir_filter_ccf(1, firdes.low_pass(
1, channel_rate, 5000, 5000, firdes.WIN_HAMMING, 6.76))
self.goertzel_fm = fft.goertzel_fc(channel_rate, dir_scale * audio_scale, 30)
self.goertzel_am = fft.goertzel_fc(internal_audio_rate, dir_scale, 30)
self.fm_channel_filter_block = grfilter.freq_xlating_fir_filter_ccc(1, (firdes.low_pass(1.0, channel_rate, fm_subcarrier / 2, fm_subcarrier / 2, firdes.WIN_HAMMING)), fm_subcarrier, channel_rate)
self.multiply_conjugate_block = blocks.multiply_conjugate_cc(1)
self.complex_to_arg_block = blocks.complex_to_arg(1)
self.am_agc_block = analog.feedforward_agc_cc(1024, 1.0)
self.am_demod_block = analog.am_demod_cf(
channel_rate=channel_rate,
audio_decim=audio_scale,
audio_pass=5000,
audio_stop=5500,
)
self.fm_demod_block = analog.quadrature_demod_cf(1)
self.phase_agc_fm = analog.agc2_cc(1e-1, 1e-2, 1.0, 1.0)
self.phase_agc_am = analog.agc2_cc(1e-1, 1e-2, 1.0, 1.0)
self.probe = blocks.probe_signal_f()
self.audio_filter_block = grfilter.fir_filter_fff(1, design_lofi_audio_filter(internal_audio_rate, False))
##################################################
# Connections
##################################################
# Input
self.connect(
self,
self.band_filter_block)
# AM chain
self.connect(
self.band_filter_block,
self.am_channel_filter_block,
self.am_agc_block,
self.am_demod_block)
# AM audio
self.connect(
self.am_demod_block,
blocks.multiply_const_ff(1.0 / audio_modulation_index * 0.5),
self.audio_filter_block)
self.connect_audio_output(self.audio_filter_block)
# AM phase
self.connect(
self.am_demod_block,
self.goertzel_am,
self.phase_agc_am,
(self.multiply_conjugate_block, 0))
# FM phase
self.connect(
self.band_filter_block,
self.fm_channel_filter_block,
self.fm_demod_block,
self.goertzel_fm,
self.phase_agc_fm,
(self.multiply_conjugate_block, 1))
# Phase comparison and output
self.connect(
self.multiply_conjugate_block,
self.dir_vector_filter,
self.complex_to_arg_block,
blocks.multiply_const_ff(-1), # opposite angle conventions
self.zeroer,
self.probe)
@exported_value(type=QuantityT(units.degree), changes='this_setter', label='Zero')
def get_zero_point(self):
return self.zero_point
@setter
def set_zero_point(self, zero_point):
self.zero_point = zero_point
self.zeroer.set_k((self.zero_point * (math.pi / 180), ))
# TODO: Have a dedicated angle type which can be specified as referenced to true/magnetic north
@exported_value(type=QuantityT(units.degree), changes='continuous', label='Bearing')
def get_angle(self):
return self.probe.level()
@implementer(IModulator)
class VORModulator(gr.hier_block2, ExportedState):
__vor_sig_freq = 30
__audio_rate = 10000
__rf_rate = 30000 # needs to be above fm_subcarrier * 2
def __init__(self, context, mode, angle=0.0):
gr.hier_block2.__init__(
self, 'SimulatedDevice VOR modulator',
gr.io_signature(1, 1, gr.sizeof_float * 1),
gr.io_signature(1, 1, gr.sizeof_gr_complex * 1),
)
self.__angle = 0.0 # dummy statically visible value will be overwritten
# TODO: My signal level parameters are probably wrong because this signal doesn't look like a real VOR signal
vor_30 = analog.sig_source_f(self.__audio_rate, analog.GR_COS_WAVE, self.__vor_sig_freq, 1, 0)
vor_add = blocks.add_cc(1)
vor_audio = blocks.add_ff(1)
# Audio/AM signal
self.connect(
vor_30,
blocks.multiply_const_ff(0.3), # M_n
(vor_audio, 0))
self.connect(
self,
blocks.multiply_const_ff(audio_modulation_index), # M_i
(vor_audio, 1))
# Carrier component
self.connect(
analog.sig_source_c(0, analog.GR_CONST_WAVE, 0, 0, 1),
(vor_add, 0))
# AM component
self.__delay = blocks.delay(gr.sizeof_gr_complex, 0) # configured by set_angle
self.connect(
vor_audio,
make_resampler(self.__audio_rate, self.__rf_rate), # TODO make a complex version and do this last
blocks.float_to_complex(1),
self.__delay,
(vor_add, 1))
# FM component
vor_fm_mult = blocks.multiply_cc(1)
self.connect( # carrier generation
analog.sig_source_f(self.__rf_rate, analog.GR_COS_WAVE, fm_subcarrier, 1, 0),
blocks.float_to_complex(1),
(vor_fm_mult, 1))
self.connect( # modulation
vor_30,
make_resampler(self.__audio_rate, self.__rf_rate),
analog.frequency_modulator_fc(2 * math.pi * fm_deviation / self.__rf_rate),
blocks.multiply_const_cc(0.3), # M_d
vor_fm_mult,
(vor_add, 2))
self.connect(
vor_add,
self)
# calculate and initialize delay
self.set_angle(angle)
@exported_value(type=RangeT([(0, 2 * math.pi)], unit=units.degree, strict=False), changes='this_setter', label='Bearing')
def get_angle(self):
return self.__angle
@setter
def set_angle(self, value):
value = float(value)
compensation = math.pi / 180 * -6.5 # empirical, calibrated against VOR receiver (and therefore probably wrong)
value = value + compensation
value = value % (2 * math.pi)
phase_shift = int(self.__rf_rate / self.__vor_sig_freq * (value / (2 * math.pi)))
self.__delay.set_dly(phase_shift)
self.__angle = value
def get_input_type(self):
return SignalType(kind='MONO', sample_rate=self.__audio_rate)
def get_output_type(self):
return SignalType(kind='IQ', sample_rate=self.__rf_rate)
# Twisted plugin exports
pluginMode = ModeDef(mode='VOR',
info='VOR',
demod_class=VOR,
mod_class=VORModulator)
pluginClient = ClientResourceDef(
key=__name__,
resource=static.File(os.path.join(os.path.split(__file__)[0], 'client')),
load_js_path='vor.js')
| gpl-3.0 | 5,031,653,032,306,906,000 | 37.738776 | 203 | 0.611632 | false | 3.458819 | false | false | false |
yafeunteun/wikipedia-spam-classifier | revscoring/revscoring/utilities/fit.py | 1 | 2484 | """
``revscoring fit -h``
::
Fits a dependent (an extractable value like a Datasource or Feature) to
observed data. These are often used along with bag-of-words
methods to reduce the feature space prior to training and testing a model
or to train a sub-model.
Usage:
fit -h | --help
fit <dependent> <label>
[--input=<path>]
[--datasource-file=<path>]
[--debug]
Options:
-h --help Prints this documentation
<dependent> The classpath to `Dependent`
that can be fit to observations
<label> The label that should be predicted
--input=<path> Path to a file containing observations
[default: <stdin>]
--datasource-file=<math> Path to a file for writing out the trained
datasource [default: <stdout>]
--debug Print debug logging.
"""
import logging
import sys
import docopt
import yamlconf
from ..dependencies import solve
from .util import read_observations
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
dependent = yamlconf.import_path(args['<dependent>'])
label_name = args['<label>']
if args['--input'] == "<stdin>":
observations = read_observations(sys.stdin)
else:
observations = read_observations(open(args['--input']))
logger.info("Reading observations...")
value_labels = [
(list(solve(dependent.dependencies, cache=ob['cache'])),
ob[label_name])
for ob in observations]
logger.debug(" -- {0} observations gathered".format(len(value_labels)))
if args['--datasource-file'] == "<stdout>":
datasource_f = sys.stdout
else:
datasource_f = open(args['--datasource-file'], 'w')
debug = args['--debug']
run(dependent, label_name, value_labels, datasource_f, debug)
def run(dependent, label_name, value_labels, datasource_f, debug):
logger.info("Fitting {0} ({1})".format(dependent, type(dependent)))
dependent.fit(value_labels)
logger.info("Writing fitted selector to {0}".format(datasource_f))
dependent.dump(datasource_f)
| mit | -7,085,234,002,436,981,000 | 30.846154 | 77 | 0.593398 | false | 4.167785 | false | false | false |
stensonowen/spim-grader | spim-grader.py | 2 | 3172 | #!/usr/bin/python
'''
SPIM Auto-grader
Owen Stenson
Grades every file in the 'submissions' folder using every test in the 'samples' folder.
Writes to 'results' folder.
'''
import os, time, re
from subprocess import Popen, PIPE, STDOUT
def run(fn, sample_input='\n'):
#start process and write input
proc = Popen(["spim", "-file", "submissions/"+fn], stdin=PIPE, stdout=PIPE, stderr=PIPE)
if sample_input[-1:] != '\n':
print "Warning: last line (of file below) must end with newline char to be submitted. Assuming it should..."
sample_input = sample_input + '\n'
proc.stdin.write(sample_input)
return proc
def grade(p, f):
#arg = process running homework file, file to write results to
print "Writing to ", f
f = open("results/" + f, 'w')
time.sleep(.1)
if p.poll() is None:
#process is either hanging or being slow
time.sleep(5)
if p.poll() is None:
p.kill()
f.write("Process hung; no results to report\n")
f.close()
return
output = p.stdout.read()
#remove output header
hdrs = []
hdrs.append(re.compile("SPIM Version .* of .*\n"))
hdrs.append(re.compile("Copyright .*, James R. Larus.\n"))
hdrs.append(re.compile("All Rights Reserved.\n"))
hdrs.append(re.compile("See the file README for a full copyright notice.\n"))
hdrs.append(re.compile("Loaded: .*/spim/.*\n"))
for hdr in hdrs:
output = re.sub(hdr, "", output)
errors = p.stderr.read()
if errors == "":
f.write("\t**PROCESS COMPLETED**\n")
f.write(output + '\n'*2)
else:
f.write("\t**PROCESS FAILED TO COMPILE**\n")
f.write(output + '\n' + errors + '\n'*2)
f.close()
def generate_filename(submission, sample):
#extract RCS id from submission title
try:
rcs_start = submission.index('_') + 1
rcs_end = min(submission.index('attempt'), submission.index('.')) - 1
rcs = submission[rcs_start:rcs_end]
except:
rcs = submission
return rcs + '__' + sample
def main():
#no use in running if content directories aren't present
assert os.path.isdir("samples")
assert os.path.isdir("submissions")
if os.path.isdir("results") is False:
assert os.path.isfile("results") == False
os.makedirs("results")
#cycle through files to grade:
for submission in os.listdir('submissions'):
#cycle through samples to test (ignore .example):
for sample in os.listdir('samples'):
#ignore example files
if submission == ".example" or sample == ".example":
continue
sample_file = open('samples/'+sample, 'r')
#read sample input; fix windows EOL char
sample_input = sample_file.read()
sample_input = sample_input.replace('\r', '')
#create process
p = run(submission, sample_input)
output_file = generate_filename(submission, sample)
grade(p, output_file)
if __name__ == "__main__":
main()
| gpl-2.0 | 1,135,793,950,941,566,300 | 34.45977 | 116 | 0.57913 | false | 3.7717 | false | false | false |
FCP-INDI/C-PAC | CPAC/utils/tests/test_datasource.py | 1 | 2537 |
import os
import json
from CPAC.pipeline import nipype_pipeline_engine as pe
import nipype.interfaces.utility as util
from CPAC.utils.test_resources import setup_test_wf
from CPAC.utils.datasource import match_epi_fmaps
def test_match_epi_fmaps():
# good data to use
s3_prefix = "s3://fcp-indi/data/Projects/HBN/MRI/Site-CBIC/sub-NDARAB708LM5"
s3_paths = [
"func/sub-NDARAB708LM5_task-rest_run-1_bold.json",
"fmap/sub-NDARAB708LM5_dir-PA_acq-fMRI_epi.nii.gz",
"fmap/sub-NDARAB708LM5_dir-PA_acq-fMRI_epi.json",
"fmap/sub-NDARAB708LM5_dir-AP_acq-fMRI_epi.nii.gz",
"fmap/sub-NDARAB708LM5_dir-AP_acq-fMRI_epi.json"
]
wf, ds, local_paths = setup_test_wf(s3_prefix, s3_paths,
"test_match_epi_fmaps")
opposite_pe_json = local_paths["fmap/sub-NDARAB708LM5_dir-PA_acq-fMRI_epi.json"]
same_pe_json = local_paths["fmap/sub-NDARAB708LM5_dir-AP_acq-fMRI_epi.json"]
func_json = local_paths["func/sub-NDARAB708LM5_task-rest_run-1_bold.json"]
with open(opposite_pe_json, "r") as f:
opposite_pe_params = json.load(f)
with open(same_pe_json, "r") as f:
same_pe_params = json.load(f)
with open(func_json, "r") as f:
func_params = json.load(f)
bold_pedir = func_params["PhaseEncodingDirection"]
fmap_paths_dct = {"epi_PA":
{"scan": local_paths["fmap/sub-NDARAB708LM5_dir-PA_acq-fMRI_epi.nii.gz"],
"scan_parameters": opposite_pe_params},
"epi_AP":
{"scan": local_paths["fmap/sub-NDARAB708LM5_dir-AP_acq-fMRI_epi.nii.gz"],
"scan_parameters": same_pe_params}
}
match_fmaps = \
pe.Node(util.Function(input_names=['fmap_dct',
'bold_pedir'],
output_names=['opposite_pe_epi',
'same_pe_epi'],
function=match_epi_fmaps,
as_module=True),
name='match_epi_fmaps')
match_fmaps.inputs.fmap_dct = fmap_paths_dct
match_fmaps.inputs.bold_pedir = bold_pedir
ds.inputs.func_json = func_json
ds.inputs.opposite_pe_json = opposite_pe_json
ds.inputs.same_pe_json = same_pe_json
wf.connect(match_fmaps, 'opposite_pe_epi', ds, 'should_be_dir-PA')
wf.connect(match_fmaps, 'same_pe_epi', ds, 'should_be_dir-AP')
wf.run()
| bsd-3-clause | -464,176,579,860,499,840 | 37.439394 | 99 | 0.573906 | false | 2.876417 | false | false | false |
elishowk/flaskexperiment | commonecouteserver/data/__init__.py | 1 | 6483 | # -*- coding: utf-8 -*-
# Copyright (c) 2011 CommOnEcoute http://commonecoute.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>
from flask import abort
import riak
import uuid
from datetime import datetime
import os
DB_HOST = os.environ.get('COESERVER_DB_HOST') or '127.0.0.1'
DB_PORT = os.environ.get('COESERVER_DB_PORT') or 8087
DB_PORT = int(DB_PORT)
import logging
logger = logging.getLogger('coeserver')
class ObjectExistsException(Exception):
pass
class GenericBucket(object):
def __init__(self, bucketname, port=DB_PORT, host=DB_HOST):
"""
initiate a riak bucket
"""
self.bucketname = bucketname
self._connect(bucketname, port, host)
def _connect(self, bucketname, port, host):
"""
Connects to a particular bucket
on the defaut port of riak protobuf interface
"""
#print "connecting to %s on port %d"%(host, port)
self.client = riak.RiakClient(host=host, port=port, transport_class=riak.RiakPbcTransport)
#self.client.set_r(1)
#self.client.set_w(1)
self.bucket = self.client.bucket(bucketname)
def _encode(self, data):
"""
on the fly encoding
"""
encodeddata = {}
for (key, value) in data.iteritems():
if isinstance(value, unicode):
encodeddata[key] = value.encode('utf-8', 'replace')
else:
encodeddata[key] = value
return encodeddata
def _addLinks(self, object, links):
"""
add links to an object given a list of identifiers
"""
for linked_key in links:
linked_object = self.bucket.get(linked_key)
object.add_link(linked_object)
linked_object.add_link(object)
def _genID(self, data):
return "%s:::%s"%(datetime.utcnow().isoformat(), uuid.uuid4())
def _getNewObject(self, data):
if self.bucket.get(data['id_txt']).exists():
raise(ObjectExistsException())
else:
encodeddata = self._encode(data)
return self.bucket.new(encodeddata['id_txt'], encodeddata)
def create(self, data, links=[]):
"""
Supply a key to store data under
The 'data' can be any data Python's 'json' encoder can handle (except unicode values with protobuf)
Returns the json object created
"""
if not self.client.is_alive():
return {'response': {"error": "database is dead"}, 'statuscode': 500}
try:
if 'id_txt' not in data:
data['id_txt'] = self._genID(data)
new_object = self._getNewObject(data)
# eventually links to other objects
self._addLinks(new_object, links)
# Save the object to Riak.
return {'response':new_object.store().get_data()}
#return new_object.get_key()
except ObjectExistsException, existsexc:
return {'response': {"error": "record already exists"}, 'statuscode': 400}
def read(self, key):
"""
Returns json object for a given key
"""
if isinstance(key, unicode):
key = key.encode('utf-8', 'replace')
response = self.bucket.get(key).get_data()
if response is None:
abort(404)
return {'response': response }
def update(self, key, update_data, links=[]):
"""
Gets an updates an item for database
Returns the updated json object
"""
if isinstance(key, unicode):
key = key.encode('utf-8', 'replace')
update_object = self.bucket.get(key)
if not update_object.exists():
abort(404)
data = update_object.get_data()
data.update(update_data)
update_object.set_data(self._encode(data))
# eventually links to other objects
self._addLinks(update_object, links)
return {'response': update_object.get_data()} or {'response': {"error": "could not update record"}, 'statuscode': 404}
def delete(self, key):
"""
Deletes a record
"""
if isinstance(key, unicode):
key = key.encode('utf-8', 'replace')
response = self.bucket.get(key)
if not response.exists():
abort(404)
else:
response.delete()
def readallkeys(self):
return {'response': self.bucket.get_keys()}
class Track(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "track", *args, **kwargs)
def _genID(self, data):
return "%s:::%s:::%s"%(data['start_date'], data['end_date'], uuid.uuid4())
class Event(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "event", *args, **kwargs)
def _genID(self, data):
return "%s:::%s:::%s"%(data['start_date'], data['end_date'], uuid.uuid4())
class User(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "user", *args, **kwargs)
def _genID(self, data):
return data['email_txt']
class Post(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "post", *args, **kwargs)
class Product(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "product", *args, **kwargs)
def _genID(self, data):
return "%s"%uuid.uuid4()
class Genre(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "genre", *args, **kwargs)
def _genID(self, data):
return "%s"%uuid.uuid4()
class Artist(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "artist", *args, **kwargs)
def _genID(self, data):
return "%s"%uuid.uuid4()
| agpl-3.0 | 1,303,229,853,120,405,800 | 31.742424 | 126 | 0.586303 | false | 3.849762 | false | false | false |
alexherns/biotite-scripts | cluster_coverage.py | 1 | 2808 | #!/usr/bin/env python2.7
import sys, operator, argparse
from Bio import SeqIO
parser = argparse.ArgumentParser(description='''Prints out the coverage values for each cluster, by sample and total.
Also lists number of hits in each cluster.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False,
epilog= '''TSV of features and as downloaded from ggkbase.
Scaffold_gene is in column 2.
Coverage value is in column 5.
Clusters file as generated from USEARCH
''')
#Required arguments
required = parser.add_argument_group('REQUIRED')
required.add_argument('-c', help= 'clusters.uc', required=True, type=str)
required.add_argument('-t', help= 'features.tsv', required=True, type=str)
#Optional arguments
optional = parser.add_argument_group('OPTIONAL')
optional.add_argument('-h', action="help", help="show this help message and exit")
args = parser.parse_args()
cluster_file= args.c
tsv_file= args.t
#Create a dictionary of feature:coverage values
#Read in the tsv of features
handle= open(tsv_file, "r")
feat2cov= {}
samples= []
for line in handle:
contig_features= line.strip().split("\t")
samples.append(contig_features[1].split("_scaffold")[0])
feature, coverage= contig_features[1], contig_features[4]
feat2cov[feature]= float(coverage)
samples= list(set(samples))
handle.close()
#Select all non-redundant cluster lines from file
clusters= [line.strip().split("\t") for line in open(cluster_file) if line[0] in ["H", "C"]]
#Extract unique list of all clusters
cluster_names= list(set([line[1]for line in clusters]))
#Dictionary of clusters:
# clust_dict[cluster_name: [clust1, ..., clustN]]
clust_dict= {}
for cluster in clusters:
if cluster[1] not in clust_dict:
clust_dict[cluster[1]]= []
clust_dict[cluster[1]].append(cluster)
#List to contain output lines
cov_list= []
for cluster in clust_dict:
#Each line in output, formatted as list
clustercov= [cluster]+[0]*(len(samples)+3)
for line in clust_dict[cluster]:
scaf= line[8]
#Append centroids
if line[0]=="C":
clustercov.append(scaf)
sample= scaf.split("_scaffold")[0]
if sample not in samples:
print "FAIL: SCAF", scaf
else:
clustercov[samples.index(sample)+1]+=feat2cov[scaf.split(" ")[0]]
#Number of samples with positive hits
clustercov[-2]= len([i for i in clustercov[1:-4] if i > 0])
#Number of hits
clustercov[-3]= len(clust_dict[cluster])
#Total (raw and not normalized) cluster coverage value
clustercov[-4]= sum(clustercov[1:-4])
cov_list.append(clustercov)
#Print header line
print "TAX\t"+"\t".join(samples)+"\tTotal\t#Hits\t#Samples\tCentroid"
#Print each line in output
print "\n".join(["\t".join([str(i) for i in row]) for row in cov_list])
| mit | 7,550,752,532,490,339,000 | 32.428571 | 118 | 0.691595 | false | 3.319149 | false | false | false |
mackong/gitql | prettytable/prettytable.py | 1 | 54214 | #!/usr/bin/env python
#
# Copyright (c) 2009-2013, Luke Maurits <[email protected]>
# All rights reserved.
# With contributions from:
# * Chris Clark
# * Klein Stephane
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = "0.7.2"
import copy
import csv
import random
import re
import sys
import textwrap
import itertools
import unicodedata
py3k = sys.version_info[0] >= 3
if py3k:
unicode = str
basestring = str
itermap = map
iterzip = zip
uni_chr = chr
from html.parser import HTMLParser
else:
itermap = itertools.imap
iterzip = itertools.izip
uni_chr = unichr
from HTMLParser import HTMLParser
if py3k and sys.version_info[1] >= 2:
from html import escape
else:
from cgi import escape
# hrule styles
FRAME = 0
ALL = 1
NONE = 2
HEADER = 3
# Table styles
DEFAULT = 10
MSWORD_FRIENDLY = 11
PLAIN_COLUMNS = 12
RANDOM = 20
_re = re.compile("\033\[[0-9;]*m")
def _get_size(text):
lines = text.split("\n")
height = len(lines)
width = max([_str_block_width(line) for line in lines])
return (width, height)
class PrettyTable(object):
def __init__(self, field_names=None, **kwargs):
"""Return a new PrettyTable instance
Arguments:
encoding - Unicode encoding scheme used to decode any encoded input
field_names - list or tuple of field names
fields - list or tuple of field names to include in displays
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
header - print a header showing field names (True or False)
header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, HEADER, ALL, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
valign - default valign for each row (None, "t", "m" or "b")
reversesort - True or False to sort in descending or ascending order"""
self.encoding = kwargs.get("encoding", "UTF-8")
# Data
self._field_names = []
self._align = {}
self._valign = {}
self._max_width = {}
self._rows = []
if field_names:
self.field_names = field_names
else:
self._widths = []
# Options
self._options = "start end fields header border sortby reversesort sort_key attributes format hrules vrules".split()
self._options.extend("int_format float_format padding_width left_padding_width right_padding_width".split())
self._options.extend("vertical_char horizontal_char junction_char header_style valign xhtml print_empty".split())
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
else:
kwargs[option] = None
self._start = kwargs["start"] or 0
self._end = kwargs["end"] or None
self._fields = kwargs["fields"] or None
if kwargs["header"] in (True, False):
self._header = kwargs["header"]
else:
self._header = True
self._header_style = kwargs["header_style"] or None
if kwargs["border"] in (True, False):
self._border = kwargs["border"]
else:
self._border = True
self._hrules = kwargs["hrules"] or FRAME
self._vrules = kwargs["vrules"] or ALL
self._sortby = kwargs["sortby"] or None
if kwargs["reversesort"] in (True, False):
self._reversesort = kwargs["reversesort"]
else:
self._reversesort = False
self._sort_key = kwargs["sort_key"] or (lambda x: x)
self._int_format = kwargs["int_format"] or {}
self._float_format = kwargs["float_format"] or {}
self._padding_width = kwargs["padding_width"] or 1
self._left_padding_width = kwargs["left_padding_width"] or None
self._right_padding_width = kwargs["right_padding_width"] or None
self._vertical_char = kwargs["vertical_char"] or self._unicode("|")
self._horizontal_char = kwargs["horizontal_char"] or self._unicode("-")
self._junction_char = kwargs["junction_char"] or self._unicode("+")
if kwargs["print_empty"] in (True, False):
self._print_empty = kwargs["print_empty"]
else:
self._print_empty = True
self._format = kwargs["format"] or False
self._xhtml = kwargs["xhtml"] or False
self._attributes = kwargs["attributes"] or {}
def _unicode(self, value):
if not isinstance(value, basestring):
value = str(value)
if not isinstance(value, unicode):
value = unicode(value, self.encoding, "strict")
return value
def _justify(self, text, width, align):
excess = width - _str_block_width(text)
if align == "l":
return text + excess * " "
elif align == "r":
return excess * " " + text
else:
if excess % 2:
# Uneven padding
# Put more space on right if text is of odd length...
if _str_block_width(text) % 2:
return (excess//2)*" " + text + (excess//2 + 1)*" "
# and more space on left if text is of even length
else:
return (excess//2 + 1)*" " + text + (excess//2)*" "
# Why distribute extra space this way? To match the behaviour of
# the inbuilt str.center() method.
else:
# Equal padding on either side
return (excess//2)*" " + text + (excess//2)*" "
def __getattr__(self, name):
if name == "rowcount":
return len(self._rows)
elif name == "colcount":
if self._field_names:
return len(self._field_names)
elif self._rows:
return len(self._rows[0])
else:
return 0
else:
raise AttributeError(name)
def __getitem__(self, index):
new = PrettyTable()
new.field_names = self.field_names
for attr in self._options:
setattr(new, "_"+attr, getattr(self, "_"+attr))
setattr(new, "_align", getattr(self, "_align"))
if isinstance(index, slice):
for row in self._rows[index]:
new.add_row(row)
elif isinstance(index, int):
new.add_row(self._rows[index])
else:
raise Exception("Index %s is invalid, must be an integer or slice" % str(index))
return new
if py3k:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode(self.encoding)
def __unicode__(self):
return self.get_string()
##############################
# ATTRIBUTE VALIDATORS #
##############################
# The method _validate_option is all that should be used elsewhere in the code base to validate options.
# It will call the appropriate validation method for that option. The individual validation methods should
# never need to be called directly (although nothing bad will happen if they *are*).
# Validation happens in TWO places.
# Firstly, in the property setters defined in the ATTRIBUTE MANAGMENT section.
# Secondly, in the _get_options method, where keyword arguments are mixed with persistent settings
def _validate_option(self, option, val):
if option in ("field_names"):
self._validate_field_names(val)
elif option in ("start", "end", "max_width", "padding_width", "left_padding_width", "right_padding_width", "format"):
self._validate_nonnegative_int(option, val)
elif option in ("sortby"):
self._validate_field_name(option, val)
elif option in ("sort_key"):
self._validate_function(option, val)
elif option in ("hrules"):
self._validate_hrules(option, val)
elif option in ("vrules"):
self._validate_vrules(option, val)
elif option in ("fields"):
self._validate_all_field_names(option, val)
elif option in ("header", "border", "reversesort", "xhtml", "print_empty"):
self._validate_true_or_false(option, val)
elif option in ("header_style"):
self._validate_header_style(val)
elif option in ("int_format"):
self._validate_int_format(option, val)
elif option in ("float_format"):
self._validate_float_format(option, val)
elif option in ("vertical_char", "horizontal_char", "junction_char"):
self._validate_single_char(option, val)
elif option in ("attributes"):
self._validate_attributes(option, val)
else:
raise Exception("Unrecognised option: %s!" % option)
def _validate_field_names(self, val):
# Check for appropriate length
if self._field_names:
try:
assert len(val) == len(self._field_names)
except AssertionError:
raise Exception("Field name list has incorrect number of values, (actual) %d!=%d (expected)" % (len(val), len(self._field_names)))
if self._rows:
try:
assert len(val) == len(self._rows[0])
except AssertionError:
raise Exception("Field name list has incorrect number of values, (actual) %d!=%d (expected)" % (len(val), len(self._rows[0])))
# # Check for uniqueness
# try:
# assert len(val) == len(set(val))
# except AssertionError:
# raise Exception("Field names must be unique!")
def _validate_header_style(self, val):
try:
assert val in ("cap", "title", "upper", "lower", None)
except AssertionError:
raise Exception("Invalid header style, use cap, title, upper, lower or None!")
def _validate_align(self, val):
try:
assert val in ["l","c","r"]
except AssertionError:
raise Exception("Alignment %s is invalid, use l, c or r!" % val)
def _validate_valign(self, val):
try:
assert val in ["t","m","b",None]
except AssertionError:
raise Exception("Alignment %s is invalid, use t, m, b or None!" % val)
def _validate_nonnegative_int(self, name, val):
try:
assert int(val) >= 0
except AssertionError:
raise Exception("Invalid value for %s: %s!" % (name, self._unicode(val)))
def _validate_true_or_false(self, name, val):
try:
assert val in (True, False)
except AssertionError:
raise Exception("Invalid value for %s! Must be True or False." % name)
def _validate_int_format(self, name, val):
if val == "":
return
try:
assert type(val) in (str, unicode)
assert val.isdigit()
except AssertionError:
raise Exception("Invalid value for %s! Must be an integer format string." % name)
def _validate_float_format(self, name, val):
if val == "":
return
try:
assert type(val) in (str, unicode)
assert "." in val
bits = val.split(".")
assert len(bits) <= 2
assert bits[0] == "" or bits[0].isdigit()
assert bits[1] == "" or bits[1].isdigit()
except AssertionError:
raise Exception("Invalid value for %s! Must be a float format string." % name)
def _validate_function(self, name, val):
try:
assert hasattr(val, "__call__")
except AssertionError:
raise Exception("Invalid value for %s! Must be a function." % name)
def _validate_hrules(self, name, val):
try:
assert val in (ALL, FRAME, HEADER, NONE)
except AssertionError:
raise Exception("Invalid value for %s! Must be ALL, FRAME, HEADER or NONE." % name)
def _validate_vrules(self, name, val):
try:
assert val in (ALL, FRAME, NONE)
except AssertionError:
raise Exception("Invalid value for %s! Must be ALL, FRAME, or NONE." % name)
def _validate_field_name(self, name, val):
try:
assert (val in self._field_names) or (val is None)
except AssertionError:
raise Exception("Invalid field name: %s!" % val)
def _validate_all_field_names(self, name, val):
try:
for x in val:
self._validate_field_name(name, x)
except AssertionError:
raise Exception("fields must be a sequence of field names!")
def _validate_single_char(self, name, val):
try:
assert _str_block_width(val) == 1
except AssertionError:
raise Exception("Invalid value for %s! Must be a string of length 1." % name)
def _validate_attributes(self, name, val):
try:
assert isinstance(val, dict)
except AssertionError:
raise Exception("attributes must be a dictionary of name/value pairs!")
##############################
# ATTRIBUTE MANAGEMENT #
##############################
def _get_field_names(self):
return self._field_names
"""The names of the fields
Arguments:
fields - list or tuple of field names"""
def _set_field_names(self, val):
val = [self._unicode(x) for x in val]
self._validate_option("field_names", val)
if self._field_names:
old_names = self._field_names[:]
self._field_names = val
if self._align and old_names:
for old_name, new_name in zip(old_names, val):
self._align[new_name] = self._align[old_name]
for old_name in old_names:
if old_name not in self._align:
self._align.pop(old_name)
else:
for field in self._field_names:
self._align[field] = "c"
if self._valign and old_names:
for old_name, new_name in zip(old_names, val):
self._valign[new_name] = self._valign[old_name]
for old_name in old_names:
if old_name not in self._valign:
self._valign.pop(old_name)
else:
for field in self._field_names:
self._valign[field] = "t"
field_names = property(_get_field_names, _set_field_names)
def _get_align(self):
return self._align
def _set_align(self, val):
self._validate_align(val)
for field in self._field_names:
self._align[field] = val
align = property(_get_align, _set_align)
def _get_valign(self):
return self._valign
def _set_valign(self, val):
self._validate_valign(val)
for field in self._field_names:
self._valign[field] = val
valign = property(_get_valign, _set_valign)
def _get_max_width(self):
return self._max_width
def _set_max_width(self, val):
self._validate_option("max_width", val)
for field in self._field_names:
self._max_width[field] = val
max_width = property(_get_max_width, _set_max_width)
def _get_fields(self):
"""List or tuple of field names to include in displays
Arguments:
fields - list or tuple of field names to include in displays"""
return self._fields
def _set_fields(self, val):
self._validate_option("fields", val)
self._fields = val
fields = property(_get_fields, _set_fields)
def _get_start(self):
"""Start index of the range of rows to print
Arguments:
start - index of first data row to include in output"""
return self._start
def _set_start(self, val):
self._validate_option("start", val)
self._start = val
start = property(_get_start, _set_start)
def _get_end(self):
"""End index of the range of rows to print
Arguments:
end - index of last data row to include in output PLUS ONE (list slice style)"""
return self._end
def _set_end(self, val):
self._validate_option("end", val)
self._end = val
end = property(_get_end, _set_end)
def _get_sortby(self):
"""Name of field by which to sort rows
Arguments:
sortby - field name to sort by"""
return self._sortby
def _set_sortby(self, val):
self._validate_option("sortby", val)
self._sortby = val
sortby = property(_get_sortby, _set_sortby)
def _get_reversesort(self):
"""Controls direction of sorting (ascending vs descending)
Arguments:
reveresort - set to True to sort by descending order, or False to sort by ascending order"""
return self._reversesort
def _set_reversesort(self, val):
self._validate_option("reversesort", val)
self._reversesort = val
reversesort = property(_get_reversesort, _set_reversesort)
def _get_sort_key(self):
"""Sorting key function, applied to data points before sorting
Arguments:
sort_key - a function which takes one argument and returns something to be sorted"""
return self._sort_key
def _set_sort_key(self, val):
self._validate_option("sort_key", val)
self._sort_key = val
sort_key = property(_get_sort_key, _set_sort_key)
def _get_header(self):
"""Controls printing of table header with field names
Arguments:
header - print a header showing field names (True or False)"""
return self._header
def _set_header(self, val):
self._validate_option("header", val)
self._header = val
header = property(_get_header, _set_header)
def _get_header_style(self):
"""Controls stylisation applied to field names in header
Arguments:
header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)"""
return self._header_style
def _set_header_style(self, val):
self._validate_header_style(val)
self._header_style = val
header_style = property(_get_header_style, _set_header_style)
def _get_border(self):
"""Controls printing of border around table
Arguments:
border - print a border around the table (True or False)"""
return self._border
def _set_border(self, val):
self._validate_option("border", val)
self._border = val
border = property(_get_border, _set_border)
def _get_hrules(self):
"""Controls printing of horizontal rules after rows
Arguments:
hrules - horizontal rules style. Allowed values: FRAME, ALL, HEADER, NONE"""
return self._hrules
def _set_hrules(self, val):
self._validate_option("hrules", val)
self._hrules = val
hrules = property(_get_hrules, _set_hrules)
def _get_vrules(self):
"""Controls printing of vertical rules between columns
Arguments:
vrules - vertical rules style. Allowed values: FRAME, ALL, NONE"""
return self._vrules
def _set_vrules(self, val):
self._validate_option("vrules", val)
self._vrules = val
vrules = property(_get_vrules, _set_vrules)
def _get_int_format(self):
"""Controls formatting of integer data
Arguments:
int_format - integer format string"""
return self._int_format
def _set_int_format(self, val):
# self._validate_option("int_format", val)
for field in self._field_names:
self._int_format[field] = val
int_format = property(_get_int_format, _set_int_format)
def _get_float_format(self):
"""Controls formatting of floating point data
Arguments:
float_format - floating point format string"""
return self._float_format
def _set_float_format(self, val):
# self._validate_option("float_format", val)
for field in self._field_names:
self._float_format[field] = val
float_format = property(_get_float_format, _set_float_format)
def _get_padding_width(self):
"""The number of empty spaces between a column's edge and its content
Arguments:
padding_width - number of spaces, must be a positive integer"""
return self._padding_width
def _set_padding_width(self, val):
self._validate_option("padding_width", val)
self._padding_width = val
padding_width = property(_get_padding_width, _set_padding_width)
def _get_left_padding_width(self):
"""The number of empty spaces between a column's left edge and its content
Arguments:
left_padding - number of spaces, must be a positive integer"""
return self._left_padding_width
def _set_left_padding_width(self, val):
self._validate_option("left_padding_width", val)
self._left_padding_width = val
left_padding_width = property(_get_left_padding_width, _set_left_padding_width)
def _get_right_padding_width(self):
"""The number of empty spaces between a column's right edge and its content
Arguments:
right_padding - number of spaces, must be a positive integer"""
return self._right_padding_width
def _set_right_padding_width(self, val):
self._validate_option("right_padding_width", val)
self._right_padding_width = val
right_padding_width = property(_get_right_padding_width, _set_right_padding_width)
def _get_vertical_char(self):
"""The charcter used when printing table borders to draw vertical lines
Arguments:
vertical_char - single character string used to draw vertical lines"""
return self._vertical_char
def _set_vertical_char(self, val):
val = self._unicode(val)
self._validate_option("vertical_char", val)
self._vertical_char = val
vertical_char = property(_get_vertical_char, _set_vertical_char)
def _get_horizontal_char(self):
"""The charcter used when printing table borders to draw horizontal lines
Arguments:
horizontal_char - single character string used to draw horizontal lines"""
return self._horizontal_char
def _set_horizontal_char(self, val):
val = self._unicode(val)
self._validate_option("horizontal_char", val)
self._horizontal_char = val
horizontal_char = property(_get_horizontal_char, _set_horizontal_char)
def _get_junction_char(self):
"""The charcter used when printing table borders to draw line junctions
Arguments:
junction_char - single character string used to draw line junctions"""
return self._junction_char
def _set_junction_char(self, val):
val = self._unicode(val)
self._validate_option("vertical_char", val)
self._junction_char = val
junction_char = property(_get_junction_char, _set_junction_char)
def _get_format(self):
"""Controls whether or not HTML tables are formatted to match styling options
Arguments:
format - True or False"""
return self._format
def _set_format(self, val):
self._validate_option("format", val)
self._format = val
format = property(_get_format, _set_format)
def _get_print_empty(self):
"""Controls whether or not empty tables produce a header and frame or just an empty string
Arguments:
print_empty - True or False"""
return self._print_empty
def _set_print_empty(self, val):
self._validate_option("print_empty", val)
self._print_empty = val
print_empty = property(_get_print_empty, _set_print_empty)
def _get_attributes(self):
"""A dictionary of HTML attribute name/value pairs to be included in the <table> tag when printing HTML
Arguments:
attributes - dictionary of attributes"""
return self._attributes
def _set_attributes(self, val):
self._validate_option("attributes", val)
self._attributes = val
attributes = property(_get_attributes, _set_attributes)
##############################
# OPTION MIXER #
##############################
def _get_options(self, kwargs):
options = {}
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
options[option] = kwargs[option]
else:
options[option] = getattr(self, "_"+option)
return options
##############################
# PRESET STYLE LOGIC #
##############################
def set_style(self, style):
if style == DEFAULT:
self._set_default_style()
elif style == MSWORD_FRIENDLY:
self._set_msword_style()
elif style == PLAIN_COLUMNS:
self._set_columns_style()
elif style == RANDOM:
self._set_random_style()
else:
raise Exception("Invalid pre-set style!")
def _set_default_style(self):
self.header = True
self.border = True
self._hrules = FRAME
self._vrules = ALL
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = "|"
self.horizontal_char = "-"
self.junction_char = "+"
def _set_msword_style(self):
self.header = True
self.border = True
self._hrules = NONE
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = "|"
def _set_columns_style(self):
self.header = True
self.border = False
self.padding_width = 1
self.left_padding_width = 0
self.right_padding_width = 8
def _set_random_style(self):
# Just for fun!
self.header = random.choice((True, False))
self.border = random.choice((True, False))
self._hrules = random.choice((ALL, FRAME, HEADER, NONE))
self._vrules = random.choice((ALL, FRAME, NONE))
self.left_padding_width = random.randint(0,5)
self.right_padding_width = random.randint(0,5)
self.vertical_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
self.horizontal_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
self.junction_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
##############################
# DATA INPUT METHODS #
##############################
def add_row(self, row):
"""Add a row to the table
Arguments:
row - row of data, should be a list with as many elements as the table
has fields"""
if self._field_names and len(row) != len(self._field_names):
raise Exception("Row has incorrect number of values, (actual) %d!=%d (expected)" %(len(row),len(self._field_names)))
if not self._field_names:
self.field_names = [("Field %d" % (n+1)) for n in range(0,len(row))]
self._rows.append(list(row))
def del_row(self, row_index):
"""Delete a row to the table
Arguments:
row_index - The index of the row you want to delete. Indexing starts at 0."""
if row_index > len(self._rows)-1:
raise Exception("Cant delete row at index %d, table only has %d rows!" % (row_index, len(self._rows)))
del self._rows[row_index]
def add_column(self, fieldname, column, align="c", valign="t"):
"""Add a column to the table.
Arguments:
fieldname - name of the field to contain the new column of data
column - column of data, should be a list with as many elements as the
table has rows
align - desired alignment for this column - "l" for left, "c" for centre and "r" for right
valign - desired vertical alignment for new columns - "t" for top, "m" for middle and "b" for bottom"""
if len(self._rows) in (0, len(column)):
self._validate_align(align)
self._validate_valign(valign)
self._field_names.append(fieldname)
self._align[fieldname] = align
self._valign[fieldname] = valign
for i in range(0, len(column)):
if len(self._rows) < i+1:
self._rows.append([])
self._rows[i].append(column[i])
else:
raise Exception("Column length %d does not match number of rows %d!" % (len(column), len(self._rows)))
def clear_rows(self):
"""Delete all rows from the table but keep the current field names"""
self._rows = []
def clear(self):
"""Delete all rows and field names from the table, maintaining nothing but styling options"""
self._rows = []
self._field_names = []
self._widths = []
##############################
# MISC PUBLIC METHODS #
##############################
def copy(self):
return copy.deepcopy(self)
##############################
# MISC PRIVATE METHODS #
##############################
def _format_value(self, field, value):
if isinstance(value, int) and field in self._int_format:
value = self._unicode(("%%%sd" % self._int_format[field]) % value)
elif isinstance(value, float) and field in self._float_format:
value = self._unicode(("%%%sf" % self._float_format[field]) % value)
return self._unicode(value)
def _compute_widths(self, rows, options):
if options["header"]:
widths = [_get_size(field)[0] for field in self._field_names]
else:
widths = len(self.field_names) * [0]
for row in rows:
for index, value in enumerate(row):
fieldname = self.field_names[index]
if fieldname in self.max_width:
widths[index] = max(widths[index], min(_get_size(value)[0], self.max_width[fieldname]))
else:
widths[index] = max(widths[index], _get_size(value)[0])
self._widths = widths
def _get_padding_widths(self, options):
if options["left_padding_width"] is not None:
lpad = options["left_padding_width"]
else:
lpad = options["padding_width"]
if options["right_padding_width"] is not None:
rpad = options["right_padding_width"]
else:
rpad = options["padding_width"]
return lpad, rpad
def _get_rows(self, options):
"""Return only those data rows that should be printed, based on slicing and sorting.
Arguments:
options - dictionary of option settings."""
# Make a copy of only those rows in the slice range
rows = copy.deepcopy(self._rows[options["start"]:options["end"]])
# Sort if necessary
if options["sortby"]:
sortindex = self._field_names.index(options["sortby"])
# Decorate
rows = [[row[sortindex]]+row for row in rows]
# Sort
rows.sort(reverse=options["reversesort"], key=options["sort_key"])
# Undecorate
rows = [row[1:] for row in rows]
return rows
def _format_row(self, row, options):
return [self._format_value(field, value) for (field, value) in zip(self._field_names, row)]
def _format_rows(self, rows, options):
return [self._format_row(row, options) for row in rows]
##############################
# PLAIN TEXT STRING METHODS #
##############################
def get_string(self, **kwargs):
"""Return string representation of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
reversesort - True or False to sort in descending or ascending order
print empty - if True, stringify just the header for an empty table, if False return an empty string """
options = self._get_options(kwargs)
lines = []
# Don't think too hard about an empty table
# Is this the desired behaviour? Maybe we should still print the header?
if self.rowcount == 0 and (not options["print_empty"] or not options["border"]):
return ""
# Get the rows we need to print, taking into account slicing, sorting, etc.
rows = self._get_rows(options)
# Turn all data in all rows into Unicode, formatted as desired
formatted_rows = self._format_rows(rows, options)
# Compute column widths
self._compute_widths(formatted_rows, options)
# Add header or top of border
self._hrule = self._stringify_hrule(options)
if options["header"]:
lines.append(self._stringify_header(options))
elif options["border"] and options["hrules"] in (ALL, FRAME):
lines.append(self._hrule)
# Add rows
for row in formatted_rows:
lines.append(self._stringify_row(row, options))
# Add bottom of border
if options["border"] and options["hrules"] == FRAME:
lines.append(self._hrule)
return self._unicode("\n").join(lines)
def _stringify_hrule(self, options):
if not options["border"]:
return ""
lpad, rpad = self._get_padding_widths(options)
if options['vrules'] in (ALL, FRAME):
bits = [options["junction_char"]]
else:
bits = [options["horizontal_char"]]
# For tables with no data or fieldnames
if not self._field_names:
bits.append(options["junction_char"])
return "".join(bits)
for field, width in zip(self._field_names, self._widths):
if options["fields"] and field not in options["fields"]:
continue
bits.append((width+lpad+rpad)*options["horizontal_char"])
if options['vrules'] == ALL:
bits.append(options["junction_char"])
else:
bits.append(options["horizontal_char"])
if options["vrules"] == FRAME:
bits.pop()
bits.append(options["junction_char"])
return "".join(bits)
def _stringify_header(self, options):
bits = []
lpad, rpad = self._get_padding_widths(options)
if options["border"]:
if options["hrules"] in (ALL, FRAME):
bits.append(self._hrule)
bits.append("\n")
if options["vrules"] in (ALL, FRAME):
bits.append(options["vertical_char"])
else:
bits.append(" ")
# For tables with no data or field names
if not self._field_names:
if options["vrules"] in (ALL, FRAME):
bits.append(options["vertical_char"])
else:
bits.append(" ")
for field, width, in zip(self._field_names, self._widths):
if options["fields"] and field not in options["fields"]:
continue
if self._header_style == "cap":
fieldname = field.capitalize()
elif self._header_style == "title":
fieldname = field.title()
elif self._header_style == "upper":
fieldname = field.upper()
elif self._header_style == "lower":
fieldname = field.lower()
else:
fieldname = field
bits.append(" " * lpad + self._justify(fieldname, width, self._align[field]) + " " * rpad)
if options["border"]:
if options["vrules"] == ALL:
bits.append(options["vertical_char"])
else:
bits.append(" ")
# If vrules is FRAME, then we just appended a space at the end
# of the last field, when we really want a vertical character
if options["border"] and options["vrules"] == FRAME:
bits.pop()
bits.append(options["vertical_char"])
if options["border"] and options["hrules"] != NONE:
bits.append("\n")
bits.append(self._hrule)
return "".join(bits)
def _stringify_row(self, row, options):
for index, field, value, width, in zip(range(0,len(row)), self._field_names, row, self._widths):
# Enforce max widths
lines = value.split("\n")
new_lines = []
for line in lines:
if _str_block_width(line) > width:
line = textwrap.fill(line, width)
new_lines.append(line)
lines = new_lines
value = "\n".join(lines)
row[index] = value
row_height = 0
for c in row:
h = _get_size(c)[1]
if h > row_height:
row_height = h
bits = []
lpad, rpad = self._get_padding_widths(options)
for y in range(0, row_height):
bits.append([])
if options["border"]:
if options["vrules"] in (ALL, FRAME):
bits[y].append(self.vertical_char)
else:
bits[y].append(" ")
for field, value, width, in zip(self._field_names, row, self._widths):
valign = self._valign[field]
lines = value.split("\n")
dHeight = row_height - len(lines)
if dHeight:
if valign == "m":
lines = [""] * int(dHeight / 2) + lines + [""] * (dHeight - int(dHeight / 2))
elif valign == "b":
lines = [""] * dHeight + lines
else:
lines = lines + [""] * dHeight
y = 0
for l in lines:
if options["fields"] and field not in options["fields"]:
continue
bits[y].append(" " * lpad + self._justify(l, width, self._align[field]) + " " * rpad)
if options["border"]:
if options["vrules"] == ALL:
bits[y].append(self.vertical_char)
else:
bits[y].append(" ")
y += 1
# If vrules is FRAME, then we just appended a space at the end
# of the last field, when we really want a vertical character
for y in range(0, row_height):
if options["border"] and options["vrules"] == FRAME:
bits[y].pop()
bits[y].append(options["vertical_char"])
if options["border"] and options["hrules"]== ALL:
bits[row_height-1].append("\n")
bits[row_height-1].append(self._hrule)
for y in range(0, row_height):
bits[y] = "".join(bits[y])
return "\n".join(bits)
##############################
# HTML STRING METHODS #
##############################
def get_html_string(self, **kwargs):
"""Return string representation of HTML formatted version of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag
xhtml - print <br/> tags if True, <br> tags if false"""
options = self._get_options(kwargs)
if options["format"]:
string = self._get_formatted_html_string(options)
else:
string = self._get_simple_html_string(options)
return string
def _get_simple_html_string(self, options):
lines = []
if options["xhtml"]:
linebreak = "<br/>"
else:
linebreak = "<br>"
open_tag = []
open_tag.append("<table")
if options["attributes"]:
for attr_name in options["attributes"]:
open_tag.append(" %s=\"%s\"" % (attr_name, options["attributes"][attr_name]))
open_tag.append(">")
lines.append("".join(open_tag))
# Headers
if options["header"]:
lines.append(" <tr>")
for field in self._field_names:
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <th>%s</th>" % escape(field).replace("\n", linebreak))
lines.append(" </tr>")
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows, options)
for row in formatted_rows:
lines.append(" <tr>")
for field, datum in zip(self._field_names, row):
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <td>%s</td>" % escape(datum).replace("\n", linebreak))
lines.append(" </tr>")
lines.append("</table>")
return self._unicode("\n").join(lines)
def _get_formatted_html_string(self, options):
lines = []
lpad, rpad = self._get_padding_widths(options)
if options["xhtml"]:
linebreak = "<br/>"
else:
linebreak = "<br>"
open_tag = []
open_tag.append("<table")
if options["border"]:
if options["hrules"] == ALL and options["vrules"] == ALL:
open_tag.append(" frame=\"box\" rules=\"all\"")
elif options["hrules"] == FRAME and options["vrules"] == FRAME:
open_tag.append(" frame=\"box\"")
elif options["hrules"] == FRAME and options["vrules"] == ALL:
open_tag.append(" frame=\"box\" rules=\"cols\"")
elif options["hrules"] == FRAME:
open_tag.append(" frame=\"hsides\"")
elif options["hrules"] == ALL:
open_tag.append(" frame=\"hsides\" rules=\"rows\"")
elif options["vrules"] == FRAME:
open_tag.append(" frame=\"vsides\"")
elif options["vrules"] == ALL:
open_tag.append(" frame=\"vsides\" rules=\"cols\"")
if options["attributes"]:
for attr_name in options["attributes"]:
open_tag.append(" %s=\"%s\"" % (attr_name, options["attributes"][attr_name]))
open_tag.append(">")
lines.append("".join(open_tag))
# Headers
if options["header"]:
lines.append(" <tr>")
for field in self._field_names:
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <th style=\"padding-left: %dem; padding-right: %dem; text-align: center\">%s</th>" % (lpad, rpad, escape(field).replace("\n", linebreak)))
lines.append(" </tr>")
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows, options)
aligns = []
valigns = []
for field in self._field_names:
aligns.append({ "l" : "left", "r" : "right", "c" : "center" }[self._align[field]])
valigns.append({"t" : "top", "m" : "middle", "b" : "bottom"}[self._valign[field]])
for row in formatted_rows:
lines.append(" <tr>")
for field, datum, align, valign in zip(self._field_names, row, aligns, valigns):
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <td style=\"padding-left: %dem; padding-right: %dem; text-align: %s; vertical-align: %s\">%s</td>" % (lpad, rpad, align, valign, escape(datum).replace("\n", linebreak)))
lines.append(" </tr>")
lines.append("</table>")
return self._unicode("\n").join(lines)
##############################
# UNICODE WIDTH FUNCTIONS #
##############################
def _char_block_width(char):
# Basic Latin, which is probably the most common case
#if char in xrange(0x0021, 0x007e):
#if char >= 0x0021 and char <= 0x007e:
if 0x0021 <= char <= 0x007e:
return 1
# Chinese, Japanese, Korean (common)
if 0x4e00 <= char <= 0x9fff:
return 2
# Hangul
if 0xac00 <= char <= 0xd7af:
return 2
# Combining?
if unicodedata.combining(uni_chr(char)):
return 0
# Hiragana and Katakana
if 0x3040 <= char <= 0x309f or 0x30a0 <= char <= 0x30ff:
return 2
# Full-width Latin characters
if 0xff01 <= char <= 0xff60:
return 2
# CJK punctuation
if 0x3000 <= char <= 0x303e:
return 2
# Backspace and delete
if char in (0x0008, 0x007f):
return -1
# Other control characters
elif char in (0x0000, 0x001f):
return 0
# Take a guess
return 1
def _str_block_width(val):
return sum(itermap(_char_block_width, itermap(ord, _re.sub("", val))))
##############################
# TABLE FACTORIES #
##############################
def from_csv(fp, field_names = None, **kwargs):
dialect = csv.Sniffer().sniff(fp.read(1024))
fp.seek(0)
reader = csv.reader(fp, dialect)
table = PrettyTable(**kwargs)
if field_names:
table.field_names = field_names
else:
if py3k:
table.field_names = [x.strip() for x in next(reader)]
else:
table.field_names = [x.strip() for x in reader.next()]
for row in reader:
table.add_row([x.strip() for x in row])
return table
def from_db_cursor(cursor, **kwargs):
if cursor.description:
table = PrettyTable(**kwargs)
table.field_names = [col[0] for col in cursor.description]
for row in cursor.fetchall():
table.add_row(row)
return table
class TableHandler(HTMLParser):
def __init__(self, **kwargs):
HTMLParser.__init__(self)
self.kwargs = kwargs
self.tables = []
self.last_row = []
self.rows = []
self.max_row_width = 0
self.active = None
self.last_content = ""
self.is_last_row_header = False
def handle_starttag(self,tag, attrs):
self.active = tag
if tag == "th":
self.is_last_row_header = True
def handle_endtag(self,tag):
if tag in ["th", "td"]:
stripped_content = self.last_content.strip()
self.last_row.append(stripped_content)
if tag == "tr":
self.rows.append(
(self.last_row, self.is_last_row_header))
self.max_row_width = max(self.max_row_width, len(self.last_row))
self.last_row = []
self.is_last_row_header = False
if tag == "table":
table = self.generate_table(self.rows)
self.tables.append(table)
self.rows = []
self.last_content = " "
self.active = None
def handle_data(self, data):
self.last_content += data
def generate_table(self, rows):
"""
Generates from a list of rows a PrettyTable object.
"""
table = PrettyTable(**self.kwargs)
for row in self.rows:
if len(row[0]) < self.max_row_width:
appends = self.max_row_width - len(row[0])
for i in range(1,appends):
row[0].append("-")
if row[1] == True:
self.make_fields_unique(row[0])
table.field_names = row[0]
else:
table.add_row(row[0])
return table
def make_fields_unique(self, fields):
"""
iterates over the row and make each field unique
"""
for i in range(0, len(fields)):
for j in range(i+1, len(fields)):
if fields[i] == fields[j]:
fields[j] += "'"
def from_html(html_code, **kwargs):
"""
Generates a list of PrettyTables from a string of HTML code. Each <table> in
the HTML becomes one PrettyTable object.
"""
parser = TableHandler(**kwargs)
parser.feed(html_code)
return parser.tables
def from_html_one(html_code, **kwargs):
"""
Generates a PrettyTables from a string of HTML code which contains only a
single <table>
"""
tables = from_html(html_code, **kwargs)
try:
assert len(tables) == 1
except AssertionError:
raise Exception("More than one <table> in provided HTML code! Use from_html instead.")
return tables[0]
##############################
# MAIN (TEST FUNCTION) #
##############################
def main():
x = PrettyTable(["City name", "Area", "Population", "Annual Rainfall"])
x.sortby = "Population"
x.reversesort = True
x.int_format["Area"] = "04d"
x.float_format = "6.1f"
x.align["City name"] = "l" # Left align city names
x.add_row(["Adelaide", 1295, 1158259, 600.5])
x.add_row(["Brisbane", 5905, 1857594, 1146.4])
x.add_row(["Darwin", 112, 120900, 1714.7])
x.add_row(["Hobart", 1357, 205556, 619.5])
x.add_row(["Sydney", 2058, 4336374, 1214.8])
x.add_row(["Melbourne", 1566, 3806092, 646.9])
x.add_row(["Perth", 5386, 1554769, 869.4])
print(x)
if __name__ == "__main__":
main()
| mit | 8,584,577,250,778,788,000 | 35.755254 | 207 | 0.569927 | false | 4.066762 | false | false | false |
croxis/SpaceDrive | spacedrive/renderpipeline/rpplugins/vxgi/voxelization_stage.py | 1 | 8394 | """
RenderPipeline
Copyright (c) 2014-2016 tobspr <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import division
from rpcore.globals import Globals
from rpcore.image import Image
from rpcore.render_stage import RenderStage
from panda3d.core import Camera, OrthographicLens, NodePath, CullFaceAttrib
from panda3d.core import DepthTestAttrib, Vec4, PTALVecBase3, Vec3, SamplerState
from panda3d.core import ColorWriteAttrib
class VoxelizationStage(RenderStage):
""" This stage voxelizes the whole scene """
required_inputs = ["DefaultEnvmap", "AllLightsData", "maxLightIndex"]
required_pipes = []
# The different states of voxelization
S_disabled = 0
S_voxelize_x = 1
S_voxelize_y = 2
S_voxelize_z = 3
S_gen_mipmaps = 4
def __init__(self, pipeline):
RenderStage.__init__(self, pipeline)
self.voxel_resolution = 256
self.voxel_world_size = -1
self.state = self.S_disabled
self.create_ptas()
def set_grid_position(self, pos):
self.pta_next_grid_pos[0] = pos
def create_ptas(self):
self.pta_next_grid_pos = PTALVecBase3.empty_array(1)
self.pta_grid_pos = PTALVecBase3.empty_array(1)
@property
def produced_inputs(self):
return {"voxelGridPosition": self.pta_grid_pos}
@property
def produced_pipes(self):
return {"SceneVoxels": self.voxel_grid}
def create(self):
# Create the voxel grid used to generate the voxels
self.voxel_temp_grid = Image.create_3d(
"VoxelsTemp", self.voxel_resolution, self.voxel_resolution,
self.voxel_resolution, "RGBA8")
self.voxel_temp_grid.set_clear_color(Vec4(0))
self.voxel_temp_nrm_grid = Image.create_3d(
"VoxelsTemp", self.voxel_resolution, self.voxel_resolution,
self.voxel_resolution, "R11G11B10")
self.voxel_temp_nrm_grid.set_clear_color(Vec4(0))
# Create the voxel grid which is a copy of the temporary grid, but stable
self.voxel_grid = Image.create_3d(
"Voxels", self.voxel_resolution, self.voxel_resolution, self.voxel_resolution, "RGBA8")
self.voxel_grid.set_clear_color(Vec4(0))
self.voxel_grid.set_minfilter(SamplerState.FT_linear_mipmap_linear)
# Create the camera for voxelization
self.voxel_cam = Camera("VoxelizeCam")
self.voxel_cam.set_camera_mask(self._pipeline.tag_mgr.get_voxelize_mask())
self.voxel_cam_lens = OrthographicLens()
self.voxel_cam_lens.set_film_size(
-2.0 * self.voxel_world_size, 2.0 * self.voxel_world_size)
self.voxel_cam_lens.set_near_far(0.0, 2.0 * self.voxel_world_size)
self.voxel_cam.set_lens(self.voxel_cam_lens)
self.voxel_cam_np = Globals.base.render.attach_new_node(self.voxel_cam)
self._pipeline.tag_mgr.register_camera("voxelize", self.voxel_cam)
# Create the voxelization target
self.voxel_target = self.create_target("VoxelizeScene")
self.voxel_target.size = self.voxel_resolution
self.voxel_target.prepare_render(self.voxel_cam_np)
# Create the target which copies the voxel grid
self.copy_target = self.create_target("CopyVoxels")
self.copy_target.size = self.voxel_resolution
self.copy_target.prepare_buffer()
# TODO! Does not work with the new render target yet - maybe add option
# to post process region for instances?
self.copy_target.instance_count = self.voxel_resolution
self.copy_target.set_shader_input("SourceTex", self.voxel_temp_grid)
self.copy_target.set_shader_input("DestTex", self.voxel_grid)
# Create the target which generates the mipmaps
self.mip_targets = []
mip_size, mip = self.voxel_resolution, 0
while mip_size > 1:
mip_size, mip = mip_size // 2, mip + 1
mip_target = self.create_target("GenMipmaps:" + str(mip))
mip_target.size = mip_size
mip_target.prepare_buffer()
mip_target.instance_count = mip_size
mip_target.set_shader_input("SourceTex", self.voxel_grid)
mip_target.set_shader_input("sourceMip", mip - 1)
mip_target.set_shader_input("DestTex", self.voxel_grid, False, True, -1, mip, 0)
self.mip_targets.append(mip_target)
# Create the initial state used for rendering voxels
initial_state = NodePath("VXGIInitialState")
initial_state.set_attrib(CullFaceAttrib.make(CullFaceAttrib.M_cull_none), 100000)
initial_state.set_attrib(DepthTestAttrib.make(DepthTestAttrib.M_none), 100000)
initial_state.set_attrib(ColorWriteAttrib.make(ColorWriteAttrib.C_off), 100000)
self.voxel_cam.set_initial_state(initial_state.get_state())
Globals.base.render.set_shader_input("voxelGridPosition", self.pta_next_grid_pos)
Globals.base.render.set_shader_input("VoxelGridDest", self.voxel_temp_grid)
def update(self):
self.voxel_cam_np.show()
self.voxel_target.active = True
self.copy_target.active = False
for target in self.mip_targets:
target.active = False
# Voxelization disable
if self.state == self.S_disabled:
self.voxel_cam_np.hide()
self.voxel_target.active = False
# Voxelization from X-Axis
elif self.state == self.S_voxelize_x:
# Clear voxel grid
self.voxel_temp_grid.clear_image()
self.voxel_cam_np.set_pos(
self.pta_next_grid_pos[0] + Vec3(self.voxel_world_size, 0, 0))
self.voxel_cam_np.look_at(self.pta_next_grid_pos[0])
# Voxelization from Y-Axis
elif self.state == self.S_voxelize_y:
self.voxel_cam_np.set_pos(
self.pta_next_grid_pos[0] + Vec3(0, self.voxel_world_size, 0))
self.voxel_cam_np.look_at(self.pta_next_grid_pos[0])
# Voxelization from Z-Axis
elif self.state == self.S_voxelize_z:
self.voxel_cam_np.set_pos(
self.pta_next_grid_pos[0] + Vec3(0, 0, self.voxel_world_size))
self.voxel_cam_np.look_at(self.pta_next_grid_pos[0])
# Generate mipmaps
elif self.state == self.S_gen_mipmaps:
self.voxel_target.active = False
self.copy_target.active = True
self.voxel_cam_np.hide()
for target in self.mip_targets:
target.active = True
# As soon as we generate the mipmaps, we need to update the grid position
# as well
self.pta_grid_pos[0] = self.pta_next_grid_pos[0]
def reload_shaders(self):
self.copy_target.shader = self.load_plugin_shader(
"/$$rp/shader/default_post_process_instanced.vert.glsl", "copy_voxels.frag.glsl")
mip_shader = self.load_plugin_shader(
"/$$rp/shader/default_post_process_instanced.vert.glsl", "generate_mipmaps.frag.glsl")
for target in self.mip_targets:
target.shader = mip_shader
def set_shader_input(self, *args):
Globals.render.set_shader_input(*args)
| mit | -8,389,214,982,361,238,000 | 40.826531 | 99 | 0.644508 | false | 3.399757 | false | false | false |
rameshg87/pyremotevbox | pyremotevbox/ZSI/twisted/WSsecurity.py | 1 | 13760 | ###########################################################################
# Joshua R. Boverhof, LBNL
# See Copyright for copyright notice!
# $Id: WSsecurity.py 1134 2006-02-24 00:23:06Z boverhof $
###########################################################################
import sys, time, warnings
import sha, base64
# twisted & related imports
from zope.interface import classProvides, implements, Interface
from twisted.python import log, failure
from twisted.web.error import NoResource
from twisted.web.server import NOT_DONE_YET
from twisted.internet import reactor
import twisted.web.http
import twisted.web.resource
# ZSI imports
from pyremotevbox.ZSI import _get_element_nsuri_name, EvaluateException, ParseException
from pyremotevbox.ZSI.parse import ParsedSoap
from pyremotevbox.ZSI.writer import SoapWriter
from pyremotevbox.ZSI.TC import _get_global_element_declaration as GED
from pyremotevbox.ZSI import fault
from pyremotevbox.ZSI.wstools.Namespaces import OASIS, DSIG
from WSresource import DefaultHandlerChain, HandlerChainInterface,\
WSAddressCallbackHandler, DataHandler, WSAddressHandler
#
# Global Element Declarations
#
UsernameTokenDec = GED(OASIS.WSSE, "UsernameToken")
SecurityDec = GED(OASIS.WSSE, "Security")
SignatureDec = GED(DSIG.BASE, "Signature")
PasswordDec = GED(OASIS.WSSE, "Password")
NonceDec = GED(OASIS.WSSE, "Nonce")
CreatedDec = GED(OASIS.UTILITY, "Created")
if None in [UsernameTokenDec,SecurityDec,SignatureDec,PasswordDec,NonceDec,CreatedDec]:
raise ImportError, 'required global element(s) unavailable: %s ' %({
(OASIS.WSSE, "UsernameToken"):UsernameTokenDec,
(OASIS.WSSE, "Security"):SecurityDec,
(DSIG.BASE, "Signature"):SignatureDec,
(OASIS.WSSE, "Password"):PasswordDec,
(OASIS.WSSE, "Nonce"):NonceDec,
(OASIS.UTILITY, "Created"):CreatedDec,
})
#
# Stability: Unstable, Untested, Not Finished.
#
class WSSecurityHandler:
"""Web Services Security: SOAP Message Security 1.0
Class Variables:
debug -- If True provide more detailed SOAP:Fault information to clients.
"""
classProvides(HandlerChainInterface)
debug = True
@classmethod
def processRequest(cls, ps, **kw):
if type(ps) is not ParsedSoap:
raise TypeError,'Expecting ParsedSoap instance'
security = ps.ParseHeaderElements([cls.securityDec])
# Assume all security headers are supposed to be processed here.
for pyobj in security or []:
for any in pyobj.Any or []:
if any.typecode is UsernameTokenDec:
try:
ps = cls.UsernameTokenProfileHandler.processRequest(ps, any)
except Exception, ex:
if cls.debug: raise
raise RuntimeError, 'Unauthorized Username/passphrase combination'
continue
if any.typecode is SignatureDec:
try:
ps = cls.SignatureHandler.processRequest(ps, any)
except Exception, ex:
if cls.debug: raise
raise RuntimeError, 'Invalid Security Header'
continue
raise RuntimeError, 'WS-Security, Unsupported token %s' %str(any)
return ps
@classmethod
def processResponse(cls, output, **kw):
return output
class UsernameTokenProfileHandler:
"""Web Services Security UsernameToken Profile 1.0
Class Variables:
targetNamespace --
"""
classProvides(HandlerChainInterface)
# Class Variables
targetNamespace = OASIS.WSSE
sweepInterval = 60*5
nonces = None
# Set to None to disable
PasswordText = targetNamespace + "#PasswordText"
PasswordDigest = targetNamespace + "#PasswordDigest"
# Override passwordCallback
passwordCallback = lambda cls,username: None
@classmethod
def sweep(cls, index):
"""remove nonces every sweepInterval.
Parameters:
index -- remove all nonces up to this index.
"""
if cls.nonces is None:
cls.nonces = []
seconds = cls.sweepInterval
cls.nonces = cls.nonces[index:]
reactor.callLater(seconds, cls.sweep, len(cls.nonces))
@classmethod
def processRequest(cls, ps, token, **kw):
"""
Parameters:
ps -- ParsedSoap instance
token -- UsernameToken pyclass instance
"""
if token.typecode is not UsernameTokenDec:
raise TypeError, 'expecting GED (%s,%s) representation.' %(
UsernameTokenDec.nspname, UsernameTokenDec.pname)
username = token.Username
# expecting only one password
# may have a nonce and a created
password = nonce = timestamp = None
for any in token.Any or []:
if any.typecode is PasswordDec:
password = any
continue
if any.typecode is NonceTypeDec:
nonce = any
continue
if any.typecode is CreatedTypeDec:
timestamp = any
continue
raise TypeError, 'UsernameTokenProfileHander unexpected %s' %str(any)
if password is None:
raise RuntimeError, 'Unauthorized, no password'
# TODO: not yet supporting complexType simpleContent in pyclass_type
attrs = getattr(password, password.typecode.attrs_aname, {})
pwtype = attrs.get('Type', cls.PasswordText)
# Clear Text Passwords
if cls.PasswordText is not None and pwtype == cls.PasswordText:
if password == cls.passwordCallback(username):
return ps
raise RuntimeError, 'Unauthorized, clear text password failed'
if cls.nonces is None: cls.sweep(0)
if nonce is not None:
if nonce in cls.nonces:
raise RuntimeError, 'Invalid Nonce'
# created was 10 seconds ago or sooner
if created is not None and created < time.gmtime(time.time()-10):
raise RuntimeError, 'UsernameToken created is expired'
cls.nonces.append(nonce)
# PasswordDigest, recommended that implemenations
# require a Nonce and Created
if cls.PasswordDigest is not None and pwtype == cls.PasswordDigest:
digest = sha.sha()
for i in (nonce, created, cls.passwordCallback(username)):
if i is None: continue
digest.update(i)
if password == base64.encodestring(digest.digest()).strip():
return ps
raise RuntimeError, 'Unauthorized, digest failed'
raise RuntimeError, 'Unauthorized, contents of UsernameToken unknown'
@classmethod
def processResponse(cls, output, **kw):
return output
@staticmethod
def hmac_sha1(xml):
return
class SignatureHandler:
"""Web Services Security UsernameToken Profile 1.0
"""
digestMethods = {
DSIG.BASE+"#sha1":sha.sha,
}
signingMethods = {
DSIG.BASE+"#hmac-sha1":hmac_sha1,
}
canonicalizationMethods = {
DSIG.C14N_EXCL:lambda node: Canonicalize(node, unsuppressedPrefixes=[]),
DSIG.C14N:lambda node: Canonicalize(node),
}
@classmethod
def processRequest(cls, ps, signature, **kw):
"""
Parameters:
ps -- ParsedSoap instance
signature -- Signature pyclass instance
"""
if token.typecode is not SignatureDec:
raise TypeError, 'expecting GED (%s,%s) representation.' %(
SignatureDec.nspname, SignatureDec.pname)
si = signature.SignedInfo
si.CanonicalizationMethod
calgo = si.CanonicalizationMethod.get_attribute_Algorithm()
for any in si.CanonicalizationMethod.Any:
pass
# Check Digest
si.Reference
context = XPath.Context.Context(ps.dom, processContents={'wsu':OASIS.UTILITY})
exp = XPath.Compile('//*[@wsu:Id="%s"]' %si.Reference.get_attribute_URI())
nodes = exp.evaluate(context)
if len(nodes) != 1:
raise RuntimeError, 'A SignedInfo Reference must refer to one node %s.' %(
si.Reference.get_attribute_URI())
try:
xml = cls.canonicalizeMethods[calgo](nodes[0])
except IndexError:
raise RuntimeError, 'Unsupported canonicalization algorithm'
try:
digest = cls.digestMethods[salgo]
except IndexError:
raise RuntimeError, 'unknown digestMethods Algorithm'
digestValue = base64.encodestring(digest(xml).digest()).strip()
if si.Reference.DigestValue != digestValue:
raise RuntimeError, 'digest does not match'
if si.Reference.Transforms:
pass
signature.KeyInfo
signature.KeyInfo.KeyName
signature.KeyInfo.KeyValue
signature.KeyInfo.RetrievalMethod
signature.KeyInfo.X509Data
signature.KeyInfo.PGPData
signature.KeyInfo.SPKIData
signature.KeyInfo.MgmtData
signature.KeyInfo.Any
signature.Object
# TODO: Check Signature
signature.SignatureValue
si.SignatureMethod
salgo = si.SignatureMethod.get_attribute_Algorithm()
if si.SignatureMethod.HMACOutputLength:
pass
for any in si.SignatureMethod.Any:
pass
# <SignedInfo><Reference URI="">
exp = XPath.Compile('//child::*[attribute::URI = "%s"]/..' %(
si.Reference.get_attribute_URI()))
nodes = exp.evaluate(context)
if len(nodes) != 1:
raise RuntimeError, 'A SignedInfo Reference must refer to one node %s.' %(
si.Reference.get_attribute_URI())
try:
xml = cls.canonicalizeMethods[calgo](nodes[0])
except IndexError:
raise RuntimeError, 'Unsupported canonicalization algorithm'
# TODO: Check SignatureValue
@classmethod
def processResponse(cls, output, **kw):
return output
class X509TokenProfileHandler:
"""Web Services Security UsernameToken Profile 1.0
"""
targetNamespace = DSIG.BASE
# Token Types
singleCertificate = targetNamespace + "#X509v3"
certificatePath = targetNamespace + "#X509PKIPathv1"
setCerticatesCRLs = targetNamespace + "#PKCS7"
@classmethod
def processRequest(cls, ps, signature, **kw):
return ps
"""
<element name="KeyInfo" type="ds:KeyInfoType"/>
<complexType name="KeyInfoType" mixed="true">
<choice maxOccurs="unbounded">
<element ref="ds:KeyName"/>
<element ref="ds:KeyValue"/>
<element ref="ds:RetrievalMethod"/>
<element ref="ds:X509Data"/>
<element ref="ds:PGPData"/>
<element ref="ds:SPKIData"/>
<element ref="ds:MgmtData"/>
<any processContents="lax" namespace="##other"/>
<!-- (1,1) elements from (0,unbounded) namespaces -->
</choice>
<attribute name="Id" type="ID" use="optional"/>
</complexType>
<element name="Signature" type="ds:SignatureType"/>
<complexType name="SignatureType">
<sequence>
<element ref="ds:SignedInfo"/>
<element ref="ds:SignatureValue"/>
<element ref="ds:KeyInfo" minOccurs="0"/>
<element ref="ds:Object" minOccurs="0" maxOccurs="unbounded"/>
</sequence>
<attribute name="Id" type="ID" use="optional"/>
</complexType>
<element name="SignatureValue" type="ds:SignatureValueType"/>
<complexType name="SignatureValueType">
<simpleContent>
<extension base="base64Binary">
<attribute name="Id" type="ID" use="optional"/>
</extension>
</simpleContent>
</complexType>
<!-- Start SignedInfo -->
<element name="SignedInfo" type="ds:SignedInfoType"/>
<complexType name="SignedInfoType">
<sequence>
<element ref="ds:CanonicalizationMethod"/>
<element ref="ds:SignatureMethod"/>
<element ref="ds:Reference" maxOccurs="unbounded"/>
</sequence>
<attribute name="Id" type="ID" use="optional"/>
</complexType>
"""
class WSSecurityHandlerChainFactory:
protocol = DefaultHandlerChain
@classmethod
def newInstance(cls):
return cls.protocol(WSAddressCallbackHandler, DataHandler,
WSSecurityHandler, WSAddressHandler())
| apache-2.0 | 5,371,576,649,039,104,000 | 34.372751 | 90 | 0.566061 | false | 4.688245 | false | false | false |
o5k/openerp-oemedical-v0.1 | openerp/addons/smsclient/smsclient.py | 1 | 17026 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2011 SYLEAM (<http://syleam.fr/>)
# Copyright (C) 2013 Julius Network Solutions SARL <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import urllib
from openerp.osv import fields, orm
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
try:
from SOAPpy import WSDL
except :
_logger.warning("ERROR IMPORTING SOAPpy, if not installed, please install it:"
" e.g.: apt-get install python-soappy")
class partner_sms_send(orm.Model):
_name = "partner.sms.send"
def _default_get_mobile(self, cr, uid, fields, context=None):
if context is None:
context = {}
partner_pool = self.pool.get('res.partner')
active_ids = fields.get('active_ids')
res = {}
i = 0
for partner in partner_pool.browse(cr, uid, active_ids, context=context):
i += 1
res = partner.mobile
if i > 1:
raise orm.except_orm(_('Error'), _('You can only select one partner'))
return res
def _default_get_gateway(self, cr, uid, fields, context=None):
if context is None:
context = {}
sms_obj = self.pool.get('sms.smsclient')
gateway_ids = sms_obj.search(cr, uid, [], limit=1, context=context)
return gateway_ids and gateway_ids[0] or False
def onchange_gateway(self, cr, uid, ids, gateway_id, context=None):
if context is None:
context = {}
sms_obj = self.pool.get('sms.smsclient')
if not gateway_id:
return {}
gateway = sms_obj.browse(cr, uid, gateway_id, context=context)
return {
'value': {
'validity': gateway.validity,
'classes': gateway.classes,
'deferred': gateway.deferred,
'priority': gateway.priority,
'coding': gateway.coding,
'tag': gateway.tag,
'nostop': gateway.nostop,
}
}
_columns = {
'mobile_to': fields.char('To', size=256, required=True),
'app_id': fields.char('API ID', size=256),
'user': fields.char('Login', size=256),
'password': fields.char('Password', size=256),
'text': fields.text('SMS Message', required=True),
'gateway': fields.many2one('sms.smsclient', 'SMS Gateway', required=True),
'validity': fields.integer('Validity',
help='the maximum time -in minute(s)- before the message is dropped'),
'classes': fields.selection([
('0', 'Flash'),
('1', 'Phone display'),
('2', 'SIM'),
('3', 'Toolkit')
], 'Class', help='the sms class: flash(0), phone display(1), SIM(2), toolkit(3)'),
'deferred': fields.integer('Deferred',
help='the time -in minute(s)- to wait before sending the message'),
'priority': fields.selection([
('0','0'),
('1','1'),
('2','2'),
('3','3')
], 'Priority', help='The priority of the message'),
'coding': fields.selection([
('1', '7 bit'),
('2', 'Unicode')
], 'Coding', help='The SMS coding: 1 for 7 bit or 2 for unicode'),
'tag': fields.char('Tag', size=256, help='an optional tag'),
'nostop': fields.boolean('NoStop', help='Do not display STOP clause in the message, this requires that this is not an advertising message'),
}
_defaults = {
'mobile_to': _default_get_mobile,
'gateway': _default_get_gateway,
}
def sms_send(self, cr, uid, ids, context=None):
if context is None:
context = {}
client_obj = self.pool.get('sms.smsclient')
for data in self.browse(cr, uid, ids, context=context):
if not data.gateway:
raise orm.except_orm(_('Error'), _('No Gateway Found'))
else:
client_obj._send_message(cr, uid, data, context=context)
return {}
class SMSClient(orm.Model):
_name = 'sms.smsclient'
_description = 'SMS Client'
_columns = {
'name': fields.char('Gateway Name', size=256, required=True),
'url': fields.char('Gateway URL', size=256,
required=True, help='Base url for message'),
'property_ids': fields.one2many('sms.smsclient.parms',
'gateway_id', 'Parameters'),
'history_line': fields.one2many('sms.smsclient.history',
'gateway_id', 'History'),
'method': fields.selection([
('http', 'HTTP Method'),
('smpp', 'SMPP Method')
], 'API Method', select=True),
'state': fields.selection([
('new', 'Not Verified'),
('waiting', 'Waiting for Verification'),
('confirm', 'Verified'),
], 'Gateway Status', select=True, readonly=True),
'users_id': fields.many2many('res.users',
'res_smsserver_group_rel', 'sid', 'uid', 'Users Allowed'),
'code': fields.char('Verification Code', size=256),
'body': fields.text('Message',
help="The message text that will be send along with the email which is send through this server"),
'validity': fields.integer('Validity',
help='The maximum time -in minute(s)- before the message is dropped'),
'classes': fields.selection([
('0', 'Flash'),
('1', 'Phone display'),
('2', 'SIM'),
('3', 'Toolkit')
], 'Class',
help='The SMS class: flash(0),phone display(1),SIM(2),toolkit(3)'),
'deferred': fields.integer('Deferred',
help='The time -in minute(s)- to wait before sending the message'),
'priority': fields.selection([
('0', '0'),
('1', '1'),
('2', '2'),
('3', '3')
], 'Priority', help='The priority of the message '),
'coding': fields.selection([
('1', '7 bit'),
('2', 'Unicode')
],'Coding', help='The SMS coding: 1 for 7 bit or 2 for unicode'),
'tag': fields.char('Tag', size=256, help='an optional tag'),
'nostop': fields.boolean('NoStop', help='Do not display STOP clause in the message, this requires that this is not an advertising message'),
'char_limit' : fields.boolean('Character Limit'),
}
_defaults = {
'state': 'new',
'method': 'http',
'validity': 10,
'classes': '1',
'deferred': 0,
'priority': '3',
'coding': '1',
'nostop': True,
'char_limit' : True,
}
def _check_permissions(self, cr, uid, id, context=None):
cr.execute('select * from res_smsserver_group_rel where sid=%s and uid=%s' % (id, uid))
data = cr.fetchall()
if len(data) <= 0:
return False
return True
def _prepare_smsclient_queue(self, cr, uid, data, name, context=None):
return {
'name': name,
'gateway_id': data.gateway.id,
'state': 'draft',
'mobile': data.mobile_to,
'msg': data.text,
'validity': data.validity,
'classes': data.classes,
'deffered': data.deferred,
'priorirty': data.priority,
'coding': data.coding,
'tag': data.tag,
'nostop': data.nostop,
}
def _send_message(self, cr, uid, data, context=None):
if context is None:
context = {}
gateway = data.gateway
if gateway:
if not self._check_permissions(cr, uid, gateway.id, context=context):
raise orm.except_orm(_('Permission Error!'), _('You have no permission to access %s ') % (gateway.name,))
url = gateway.url
name = url
if gateway.method == 'http':
prms = {}
for p in data.gateway.property_ids:
if p.type == 'user':
prms[p.name] = p.value
elif p.type == 'password':
prms[p.name] = p.value
elif p.type == 'to':
prms[p.name] = data.mobile_to
elif p.type == 'sms':
prms[p.name] = data.text
elif p.type == 'extra':
prms[p.name] = p.value
params = urllib.urlencode(prms)
name = url + "?" + params
queue_obj = self.pool.get('sms.smsclient.queue')
vals = self._prepare_smsclient_queue(cr, uid, data, name, context=context)
queue_obj.create(cr, uid, vals, context=context)
return True
def _check_queue(self, cr, uid, context=None):
if context is None:
context = {}
queue_obj = self.pool.get('sms.smsclient.queue')
history_obj = self.pool.get('sms.smsclient.history')
sids = queue_obj.search(cr, uid, [
('state', '!=', 'send'),
('state', '!=', 'sending')
], limit=30, context=context)
queue_obj.write(cr, uid, sids, {'state': 'sending'}, context=context)
error_ids = []
sent_ids = []
for sms in queue_obj.browse(cr, uid, sids, context=context):
if sms.gateway_id.char_limit:
if len(sms.msg) > 160:
error_ids.append(sms.id)
continue
if sms.gateway_id.method == 'http':
try:
urllib.urlopen(sms.name)
except Exception as e:
raise orm.except_orm('Error', e)
### New Send Process OVH Dedicated ###
## Parameter Fetch ##
if sms.gateway_id.method == 'smpp':
for p in sms.gateway_id.property_ids:
if p.type == 'user':
login = p.value
elif p.type == 'password':
pwd = p.value
elif p.type == 'sender':
sender = p.value
elif p.type == 'sms':
account = p.value
try:
soap = WSDL.Proxy(sms.gateway_id.url)
message = ''
if sms.coding == '2':
message = str(sms.msg).decode('iso-8859-1').encode('utf8')
if sms.coding == '1':
message = str(sms.msg)
result = soap.telephonySmsUserSend(str(login), str(pwd),
str(account), str(sender), str(sms.mobile), message,
int(sms.validity), int(sms.classes), int(sms.deferred),
int(sms.priority), int(sms.coding),str(sms.gateway_id.tag), int(sms.gateway_id.nostop))
### End of the new process ###
except Exception as e:
raise orm.except_orm('Error', e)
history_obj.create(cr, uid, {
'name': _('SMS Sent'),
'gateway_id': sms.gateway_id.id,
'sms': sms.msg,
'to': sms.mobile,
}, context=context)
sent_ids.append(sms.id)
queue_obj.write(cr, uid, sent_ids, {'state': 'send'}, context=context)
queue_obj.write(cr, uid, error_ids, {
'state': 'error',
'error': 'Size of SMS should not be more then 160 char'
}, context=context)
return True
class SMSQueue(orm.Model):
_name = 'sms.smsclient.queue'
_description = 'SMS Queue'
_columns = {
'name': fields.text('SMS Request', size=256,
required=True, readonly=True,
states={'draft': [('readonly', False)]}),
'msg': fields.text('SMS Text', size=256,
required=True, readonly=True,
states={'draft': [('readonly', False)]}),
'mobile': fields.char('Mobile No', size=256,
required=True, readonly=True,
states={'draft': [('readonly', False)]}),
'gateway_id': fields.many2one('sms.smsclient',
'SMS Gateway', readonly=True,
states={'draft': [('readonly', False)]}),
'state': fields.selection([
('draft', 'Queued'),
('sending', 'Waiting'),
('send', 'Sent'),
('error', 'Error'),
], 'Message Status', select=True, readonly=True),
'error': fields.text('Last Error', size=256,
readonly=True,
states={'draft': [('readonly', False)]}),
'date_create': fields.datetime('Date', readonly=True),
'validity': fields.integer('Validity',
help='The maximum time -in minute(s)- before the message is dropped'),
'classes': fields.selection([
('0', 'Flash'),
('1', 'Phone display'),
('2', 'SIM'),
('3', 'Toolkit')
], 'Class', help='The sms class: flash(0), phone display(1), SIM(2), toolkit(3)'),
'deferred': fields.integer('Deferred',
help='The time -in minute(s)- to wait before sending the message'),
'priority': fields.selection([
('0', '0'),
('1', '1'),
('2', '2'),
('3', '3')
], 'Priority', help='The priority of the message '),
'coding': fields.selection([
('1', '7 bit'),
('2', 'Unicode')
], 'Coding', help='The sms coding: 1 for 7 bit or 2 for unicode'),
'tag': fields.char('Tag', size=256,
help='An optional tag'),
'nostop': fields.boolean('NoStop', help='Do not display STOP clause in the message, this requires that this is not an advertising message'),
}
_defaults = {
'date_create': fields.datetime.now,
'state': 'draft',
}
class Properties(orm.Model):
_name = 'sms.smsclient.parms'
_description = 'SMS Client Properties'
_columns = {
'name': fields.char('Property name', size=256,
help='Name of the property whom appear on the URL'),
'value': fields.char('Property value', size=256,
help='Value associate on the property for the URL'),
'gateway_id': fields.many2one('sms.smsclient', 'SMS Gateway'),
'type': fields.selection([
('user', 'User'),
('password', 'Password'),
('sender', 'Sender Name'),
('to', 'Recipient No'),
('sms', 'SMS Message'),
('extra', 'Extra Info')
], 'API Method', select=True,
help='If parameter concern a value to substitute, indicate it'),
}
class HistoryLine(orm.Model):
_name = 'sms.smsclient.history'
_description = 'SMS Client History'
_columns = {
'name': fields.char('Description', size=160, required=True, readonly=True),
'date_create': fields.datetime('Date', readonly=True),
'user_id': fields.many2one('res.users', 'Username', readonly=True, select=True),
'gateway_id': fields.many2one('sms.smsclient', 'SMS Gateway', ondelete='set null', required=True),
'to': fields.char('Mobile No', size=15, readonly=True),
'sms': fields.text('SMS', size=160, readonly=True),
}
_defaults = {
'date_create': fields.datetime.now,
'user_id': lambda obj, cr, uid, context: uid,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
super(HistoryLine, self).create(cr, uid, vals, context=context)
cr.commit()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,570,020,406,153,420,000 | 40.730392 | 148 | 0.508458 | false | 4.061546 | false | false | false |
bderembl/mitgcm_configs | eddy_airsea/analysis/ode_wave.py | 1 | 1112 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
plt.ion()
f0 = 1e-4
u0 = 1.0
R0 = 40e3 # radius
vmax = -1.0 # m/s
def v1(rr):
v = -vmax*rr/R0*np.exp(-0.5*(rr/R0)**2)
# v = -vmax*np.tanh(rr/R0)/(np.cosh(rr/R0))**2/(np.tanh(1.0)/(np.cosh(1.0))**2)
return v
def dv1(rr):
v = -vmax/R0*np.exp(-0.5*(rr/R0)**2)*(1-(rr/R0)**2)
# v = -vmax*2/R0*np.tanh(rr/R0)/((np.cosh(rr/R0))**2)*(1/(np.cosh(rr/R0))**2 - (np.tanh(rr/R0))**2)/(np.tanh(1.0)/(np.cosh(1.0))**2)
return v
def f(r, t):
omega = np.sqrt((dv1(r)+v1(r)/r + f0)*(2*v1(r)/r + f0))
return u0*np.sin(omega*t)
si_r = 30
si_t = 30000
r0 = np.linspace(1,5*R0,si_r)
t = np.linspace(0, si_t/f0/1000, si_t)
ra = np.zeros((si_t,si_r))
for ni in range(0,si_r):
ra[:,ni] = integrate.odeint(f, r0[ni], t).squeeze()
plt.figure()
plt.plot(t*f0/(2*np.pi),ra/R0,'k',linewidth=1)
plt.xlabel(r'$tf/2\pi$')
plt.ylabel(r'$r_p/R_0$')
plt.xlim([np.min(t*f0/(2*np.pi)), np.max(t*f0/(2*np.pi))])
plt.ylim([np.min(ra/R0), 1.05*np.max(ra/R0)])
plt.savefig("ode_k0.pdf",bbox_inches='tight')
| mit | -6,112,656,027,416,300,000 | 23.173913 | 133 | 0.579137 | false | 1.891156 | false | false | false |
cmc333333/regulations-parser | regparser/tree/paragraph.py | 1 | 6226 | import hashlib
import re
from regparser.tree import struct
from regparser.tree.depth import markers as mtypes
from regparser.search import segments
p_levels = [list(mtypes.lower), list(mtypes.ints), list(mtypes.roman),
list(mtypes.upper), list(mtypes.em_ints), list(mtypes.em_roman)]
def p_level_of(marker):
"""Given a marker(string), determine the possible paragraph levels it
could fall into. This is useful for determining the order of
paragraphs"""
potential_levels = []
for level, markers in enumerate(p_levels):
if marker in markers:
potential_levels.append(level)
return potential_levels
_NONWORDS = re.compile(r'\W+')
def hash_for_paragraph(text):
"""Hash a chunk of text and convert it into an integer for use with a
MARKERLESS paragraph identifier. We'll trim to just 8 hex characters for
legibility. We don't need to fear hash collisions as we'll have 16**8 ~ 4
billion possibilities. The birthday paradox tells us we'd only expect
collisions after ~ 60 thousand entries. We're expecting at most a few
hundred"""
phrase = _NONWORDS.sub('', text.lower())
hashed = hashlib.sha1(phrase).hexdigest()[:8]
return int(hashed, 16)
class ParagraphParser():
def __init__(self, p_regex, node_type):
"""p_regex is the regular expression used when searching through
paragraphs. It should contain a %s for the next paragraph 'part'
(e.g. 'a', 'A', '1', 'i', etc.) inner_label_fn is a function which
takes the current label, and the next paragraph 'part' and produces
a new label."""
self.p_regex = p_regex
self.node_type = node_type
def matching_subparagraph_ids(self, p_level, paragraph):
"""Return a list of matches if this paragraph id matches one of the
subparagraph ids (e.g. letter (i) and roman numeral (i)."""
matches = []
for depth in range(p_level+1, len(p_levels)):
for sub_id, sub in enumerate(p_levels[depth]):
if sub == p_levels[p_level][paragraph]:
matches.append((depth, sub_id))
return matches
def best_start(self, text, p_level, paragraph, starts, exclude=[]):
"""Given a list of potential paragraph starts, pick the best based
on knowledge of subparagraph structure. Do this by checking if the
id following the subparagraph (e.g. ii) is between the first match
and the second. If so, skip it, as that implies the first match was
a subparagraph."""
subparagraph_hazards = self.matching_subparagraph_ids(
p_level, paragraph)
starts = starts + [(len(text), len(text))]
for i in range(1, len(starts)):
_, prev_end = starts[i-1]
next_start, _ = starts[i]
s_text = text[prev_end:next_start]
s_exclude = [
(e_start + prev_end, e_end + prev_end)
for e_start, e_end in exclude]
is_subparagraph = False
for hazard_level, hazard_idx in subparagraph_hazards:
if self.find_paragraph_start_match(
s_text, hazard_level, hazard_idx + 1, s_exclude):
is_subparagraph = True
if not is_subparagraph:
return starts[i-1]
def find_paragraph_start_match(self, text, p_level, paragraph, exclude=[]):
"""Find the positions for the start and end of the requested label.
p_Level is one of 0,1,2,3; paragraph is the index within that label.
Return None if not present. Does not return results in the exclude
list (a list of start/stop indices). """
if len(p_levels) <= p_level or len(p_levels[p_level]) <= paragraph:
return None
match_starts = [(m.start(), m.end()) for m in re.finditer(
self.p_regex % p_levels[p_level][paragraph], text)]
match_starts = [
(start, end) for start, end in match_starts
if all([end < es or start > ee for es, ee in exclude])]
if len(match_starts) == 0:
return None
elif len(match_starts) == 1:
return match_starts[0]
else:
return self.best_start(
text, p_level, paragraph, match_starts, exclude)
def paragraph_offsets(self, text, p_level, paragraph, exclude=[]):
"""Find the start/end of the requested paragraph. Assumes the text
does not just up a p_level -- see build_paragraph_tree below."""
start = self.find_paragraph_start_match(
text, p_level, paragraph, exclude)
if start is None:
return None
id_start, id_end = start
end = self.find_paragraph_start_match(
text[id_end:], p_level, paragraph + 1,
[(e_start - id_end, e_end - id_end)
for e_start, e_end in exclude])
if end is None:
end = len(text)
else:
end = end[0] + id_end
return (id_start, end)
def paragraphs(self, text, p_level, exclude=[]):
"""Return a list of paragraph offsets defined by the level param."""
def offsets_fn(remaining_text, p_idx, exclude):
return self.paragraph_offsets(
remaining_text, p_level, p_idx, exclude)
return segments(text, offsets_fn, exclude)
def build_tree(self, text, p_level=0, exclude=[], label=[],
title=''):
"""
Build a dict to represent the text hierarchy.
"""
subparagraphs = self.paragraphs(text, p_level, exclude)
if subparagraphs:
body_text = text[0:subparagraphs[0][0]]
else:
body_text = text
children = []
for paragraph, (start, end) in enumerate(subparagraphs):
new_text = text[start:end]
new_excludes = [(e[0] - start, e[1] - start) for e in exclude]
new_label = label + [p_levels[p_level][paragraph]]
children.append(
self.build_tree(
new_text, p_level + 1, new_excludes, new_label))
return struct.Node(body_text, children, label, title, self.node_type)
| cc0-1.0 | 749,164,101,625,689,200 | 40.785235 | 79 | 0.596209 | false | 3.905897 | false | false | false |
eamars/webserver | site-package/roster/sql.py | 1 | 4194 | import mysql.connector
SQL_CREATE_TABLE = \
"""
CREATE TABLE `{}` (
`date` date NOT NULL UNIQUE,
`chair` char(64) NOT NULL DEFAULT '',
`minute` char(64) NOT NULL DEFAULT '',
PRIMARY KEY (`date`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8
"""
def create_database(cursor, database_name):
try:
cursor.execute("CREATE DATABASE `{}` DEFAULT CHARACTER SET 'utf8'".format(database_name))
except mysql.connector.Error as e:
print("Error [{}]: failed to create database [{}]".format(e, database_name))
raise Exception("MySQL")
def create_table(cursor, table_name):
try:
cursor.execute(SQL_CREATE_TABLE.format(table_name))
except mysql.connector.Error as e:
print("Error [{}]: failed to create table [{}]".format(e, table_name))
raise Exception("MySQL")
def establish_connection(config):
# Connection to server
connection = mysql.connector.connect(**config)
return connection
def close_connection(connection):
connection.close()
def connect_database(connection, database_name):
# Connect to database, or create a new one
try:
connection.database = database_name
except mysql.connector.Error as e:
if e.errno == 1049:
# Get cursor
cursor = connection.cursor()
print("Creating database [{}]".format(database_name))
create_database(cursor, database_name)
# Close cursor
cursor.close()
connection.database = database_name
else:
print("Error [{}]: connect database".format(e))
raise Exception("MySQL")
def entry_exists(connection, table_name, condition):
cursor = connection.cursor()
sql = "SELECT COUNT(*) FROM `{}` WHERE {}".format(table_name, condition)
# print(sql)
try:
cursor.execute(sql)
for result in cursor:
if result[0] == 0:
cursor.close()
return False
else:
cursor.close()
return True
except mysql.connector.Error as e:
if e.errno == 1146: # Table doesn't exist
print("Creating table [{}]".format(table_name))
create_table(cursor, table_name)
cursor.close()
return False
else:
print("Error [{}]: entry exists".format(e))
print(sql)
cursor.close()
raise Exception("MySQL")
def fetch_entry(connection, table_name, condition):
cursor = connection.cursor()
sql = "SELECT `chair`, `minute` from `{}` WHERE {}".format(table_name, condition)
try:
cursor.execute(sql)
for result in cursor:
return result[0], result[1]
except mysql.connector.Error as e:
if e.errno == 1146: # Table doesn't exist
print("Creating table [{}]".format(table_name))
create_table(cursor, table_name)
cursor.close()
return False
else:
print("Error [{}]: entry exists".format(e))
print(sql)
cursor.close()
raise Exception("MySQL")
def insert_entry(connection, table_name, value):
cursor = connection.cursor()
sql = "INSERT INTO `{}` {}".format(table_name, value)
# print(sql)
try:
cursor.execute(sql)
cursor.close()
except mysql.connector.Error as e:
if e.errno == 1146: # Table doesn't exist
print("Creating table [{}]".format(table_name))
create_table(cursor, table_name)
# Try to execute again
cursor.execute(sql)
cursor.close()
else:
print("Error [{}]: insert entry".format(e))
print(sql)
cursor.close()
raise Exception("MySQL")
def main():
SQL_CONFIG = {
"host": "192.168.2.5",
"user": "eamars",
"password": "931105",
"autocommit": True
}
connection = establish_connection(SQL_CONFIG)
connect_database(connection, "test")
print(entry_exists(connection, "roster", "chair=`Ran Bao`"))
close_connection(connection)
if __name__ == "__main__":
main()
| mit | -4,404,401,249,818,260,000 | 27.147651 | 97 | 0.572246 | false | 4.083739 | false | false | false |
shear/rppy | rppy/fluid.py | 2 | 5952 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# rppy - a geophysical library for Python
# Copyright (c) 2014, Sean M. Contenti
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
def ciz_shapiro(K0, Kdry, Kf, u0, udry, uf, phi, Kphi=None, uphi=None):
"""
Generalized form of Gassmann's equation to perform fluid substitution to
allow for a solid (non-zero shear modulus) pore-filling material.
"""
if Kphi is None:
Kphi = K0
Ksat = (1/Kdry - (1/Kdry - 1/K0)**2 /
(phi*(1/Kf - 1/Kphi) + (1/Kdry - 1/K0)))
usat = (1/udry - (1/udry - 1/u0)**2 /
(phi*(1/uf - 1/uphi) + (1/udry - 1/u0)))
return(Ksat, usat)
def gassmann(K0, Kin, Kfin, Kfout, phi):
"""
Use Gassmann's equation to perform fluid substitution. Use the bulk modulus
of a rock saturated with one fluid (or dry frame, Kfin=0) to preduct the
bulk modulus of a rock second with a second fluid.
:param K0: Frame mineral modulus (Gpa)
:param Kin: Input rock modulus (can be fluid saturated or dry)
:param Kfin: Bulk modulus of the pore-filling fluid of the inital rock
(0 if input is the dry-rock modulus)
:param Kfout: Bulk modulus of the pore-filling fluid of the output
(0 if output is dry-rock modulus)
:param phi: Porosity of the rock
"""
A = Kfout / (phi*(K0 - Kfout))
B = Kin / (K0 - Kin)
C = Kfin / (phi*(K0 - Kfin))
D = A + B - C
Kout = K0*D / (1 + D)
return(Kout)
def batzle_wang(P, T, fluid, S=None, G=None, api=None):
"""
Calculate the elastic properties of reservoir fluids using the
Batzle & Wang [1992] equations.
:param P: Pressure (MPa)
:param T: Temperature {deg C)
:param fluid: Fluid type to calculate: brine, gas, or oil
:param S: Salinity (brine only, in ppm)
:param G: Gas gravity (gas mode only, ratio of gas density to air density
at 15.6C and atmospheric pressure)
:param api: American Petroleum Insitute (API) oil gravity
"""
if fluid == 'brine':
S = S / (10**6) # ppm to fraction of one
w = np.array([
[1402.85, 1.524, 3.437e-3, -1.197e-5],
[4.871, -0.0111, 1.739e-4, -1.628e-6],
[-0.04783, 2.747e-4, -2.135e-6, 1.237e-8],
[1.487e-4, -6.503e-7, -1.455e-8, 1.327e-10],
[-2.197e-7, 7.987e-10, 5.230e-11, -4.614e-13],
])
rhow = (1 + (10**-6)*(-80*T - 3.3*(T**2) + 0.00175*(T**3) +
489*P - 2*T*P + 0.016*(T**2)*P - (1.3e-5)*(T**3)*P -
0.333*(P**2) - 0.002*T*(P**2)))
rhob = rhow + S*(0.668 + 0.44*S + (10**-6)*(300*P - 2400*P*S +
T*(80 + 3*T - 3300*S - 13*P + 47*P*S)))
Vw = 0
for i in range(4):
for j in range(3):
Vw = Vw + w[i][j]*T**i*P**j
Vb = (Vw + S*(1170 - 9.8*T + 0.055*T**2 - 8.5e-5*T**3 + 2.6*P -
0.0029*T*P - 0.0476*P**2) + S**(3/2)*(780 - 10*P + 0.16*P**2) -
1820*S**2)
out = {'rho': rhob, 'Vp': Vb}
elif fluid == 'oil':
Rg = 2.03*G*(P*np.exp(0.02878*api - 0.00377*T))**1.205
rho0 = 141.5 / (api + 131.5)
B0 = 0.972 + 0.00038*(2.4*Rg*(G/rho0)**0.5 + T + 17.8)**(1.175)
rho_r = (rho0/B0)*(1 + 0.001*Rg)**-1 # pseudo-density of oil
rhog = (rho0 + 0.0012*G*Rg)/B0 # density of oil with gas
rhop = (rhog + (0.00277*P - # correct for pressure
1.71e-7*P**3)*(rhog - 1.15)**2 + 3.49e-4*P)
rho = rhop / (0.972 + 3.81e-4*(T + 17.78)**1.175) # correct for temp
Vp = 2096*(rho_r / (2.6 - rho_r))**0.5 - 3.7*T + 4.64*P + 0.0115*(
4.12*(1.08/rho_r - 1)**0.5 -1)*T*P
out = {'rho': rho, 'Vp': Vp}
elif fluid == 'gas':
Ta = T + 273.15 # absolute temperature
Pr = P / (4.892 - 0.4048*G) # pseudo-pressure
Tr = Ta / (94.72 + 170.75*G) # pseudo-temperature
R = 8.31441
d = np.exp(-(0.45 + 8*(0.56 - 1/Tr)**2)*Pr**1.2/Tr)
c = 0.109*(3.85 - Tr)**2
b = 0.642*Tr - 0.007*Tr**4 - 0.52
a = 0.03 + 0.00527*(3.5 - Tr)**3
m = 1.2*(-(0.45 + 8*(0.56 - 1/Tr)**2)*Pr**0.2/Tr)
y = (0.85 + 5.6/(Pr + 2) + 27.1/(Pr + 3.5)**2 -
8.7*np.exp(-0.65*(Pr + 1)))
f = c*d*m + a
E = c*d
Z = a*Pr + b + E
rhog = (28.8*G*P) / (Z*R*Ta)
Kg = P*y / (1 - Pr*f/Z)
out = {'rho': rhog, 'K': Kg}
else:
out = None
return(out)
| bsd-2-clause | -4,058,591,948,400,436,700 | 36.670886 | 79 | 0.544859 | false | 2.808872 | false | false | false |
cmacmackin/ford | ford/graphs.py | 1 | 48315 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# graphs.py
# This file is part of FORD.
#
# Copyright 2015 Christopher MacMackin <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from __future__ import print_function
import os
import shutil
import re
import copy
import colorsys
from graphviz import Digraph
from ford.sourceform import FortranFunction, FortranSubroutine, FortranInterface, FortranProgram, FortranType, FortranModule, FortranSubmodule, FortranSubmoduleProcedure, FortranSourceFile, FortranBlockData
_coloured_edges = False
def set_coloured_edges(val):
'''
Public accessor to set whether to use coloured edges in graph or just
use black ones.
'''
global _coloured_edges
_coloured_edges = val
_parentdir = ''
def set_graphs_parentdir(val):
'''
Public accessor to set the parent directory of the graphs.
Needed for relative paths.
'''
global _parentdir
_parentdir = val
def rainbowcolour(depth, maxd):
if _coloured_edges:
(r, g, b) = colorsys.hsv_to_rgb(float(depth) / maxd, 1.0, 1.0)
R, G, B = int(255 * r), int(255 * g), int(255 * b)
return R, G, B
else:
return 0, 0, 0
HYPERLINK_RE = re.compile("^\s*<\s*a\s+.*href=(\"[^\"]+\"|'[^']+').*>(.*)</\s*a\s*>\s*$",re.IGNORECASE)
WIDTH_RE = re.compile('width="(.*?)pt"',re.IGNORECASE)
HEIGHT_RE = re.compile('height="(.*?)pt"',re.IGNORECASE)
EM_RE = re.compile('<em>(.*)</em>',re.IGNORECASE)
graphviz_installed = True
def newdict(old,key,val):
new = copy.copy(old)
new[key] = val
return new
def is_module(obj,cls):
return isinstance(obj,FortranModule) or issubclass(cls,FortranModule)
def is_submodule(obj,cls):
return isinstance(obj,FortranSubmodule) or issubclass(cls,FortranSubmodule)
def is_type(obj,cls):
return isinstance(obj,FortranType) or issubclass(cls,FortranType)
def is_proc(obj,cls):
return (isinstance(obj,(FortranFunction,FortranSubroutine,
FortranInterface,FortranSubmoduleProcedure))
or issubclass(cls,(FortranFunction,FortranSubroutine,
FortranInterface,FortranSubmoduleProcedure)))
def is_program(obj, cls):
return isinstance(obj,FortranProgram) or issubclass(cls,FortranProgram)
def is_sourcefile(obj, cls):
return isinstance(obj,FortranSourceFile) or issubclass(cls,FortranSourceFile)
def is_blockdata(obj, cls):
return isinstance(obj,FortranBlockData) or issubclass(cls,FortranBlockData)
class GraphData(object):
"""
Contains all of the nodes which may be displayed on a graph.
"""
def __init__(self):
self.submodules = {}
self.modules = {}
self.types = {}
self.procedures = {}
self.programs = {}
self.sourcefiles = {}
self.blockdata = {}
def register(self,obj,cls=type(None),hist={}):
"""
Takes a FortranObject and adds it to the appropriate list, if
not already present.
"""
#~ ident = getattr(obj,'ident',obj)
if is_submodule(obj,cls):
if obj not in self.submodules: self.submodules[obj] = SubmodNode(obj,self)
elif is_module(obj,cls):
if obj not in self.modules: self.modules[obj] = ModNode(obj,self)
elif is_type(obj,cls):
if obj not in self.types: self.types[obj] = TypeNode(obj,self,hist)
elif is_proc(obj,cls):
if obj not in self.procedures: self.procedures[obj] = ProcNode(obj,self,hist)
elif is_program(obj,cls):
if obj not in self.programs: self.programs[obj] = ProgNode(obj,self)
elif is_sourcefile(obj,cls):
if obj not in self.sourcefiles: self.sourcefiles[obj] = FileNode(obj,self)
elif is_blockdata(obj,cls):
if obj not in self.blockdata: self.blockdata[obj] = BlockNode(obj,self)
else:
raise BadType("Object type {} not recognized by GraphData".format(type(obj).__name__))
def get_node(self,obj,cls=type(None),hist={}):
"""
Returns the node corresponding to obj. If does not already exist
then it will create it.
"""
#~ ident = getattr(obj,'ident',obj)
if obj in self.modules and is_module(obj,cls):
return self.modules[obj]
elif obj in self.submodules and is_submodule(obj,cls):
return self.submodules[obj]
elif obj in self.types and is_type(obj,cls):
return self.types[obj]
elif obj in self.procedures and is_proc(obj,cls):
return self.procedures[obj]
elif obj in self.programs and is_program(obj,cls):
return self.programs[obj]
elif obj in self.sourcefiles and is_sourcefile(obj,cls):
return self.sourcefiles[obj]
elif obj in self.blockdata and is_blockdata(obj,cls):
return self.blockdata[obj]
else:
self.register(obj,cls,hist)
return self.get_node(obj,cls,hist)
class BaseNode(object):
colour = '#777777'
def __init__(self,obj):
self.attribs = {'color':self.colour,
'fontcolor':'white',
'style':'filled'}
self.fromstr = type(obj) is str
self.url = None
if self.fromstr:
m = HYPERLINK_RE.match(obj)
if m:
self.url = m.group(1)[1:-1]
self.name = m.group(2)
else:
self.name = obj
self.ident = self.name
else:
d = obj.get_dir()
if not d: d = 'none'
self.ident = d + '~' + obj.ident
self.name = obj.name
m = EM_RE.search(self.name)
if m: self.name = '<<i>'+m.group(1).strip()+'</i>>'
self.url = obj.get_url()
self.attribs['label'] = self.name
if self.url and getattr(obj,'visible',True):
if self.fromstr:
self.attribs['URL'] = self.url
else:
self.attribs['URL'] = _parentdir + self.url
self.afferent = 0
self.efferent = 0
def __eq__(self, other):
return self.ident == other.ident
def __hash__(self):
return hash(self.ident)
class ModNode(BaseNode):
colour = '#337AB7'
def __init__(self,obj,gd):
super(ModNode,self).__init__(obj)
self.uses = set()
self.used_by = set()
self.children = set()
if not self.fromstr:
for u in obj.uses:
n = gd.get_node(u,FortranModule)
n.used_by.add(self)
n.afferent += 1
self.uses.add(n)
self.efferent += n.efferent
class SubmodNode(ModNode):
colour = '#5bc0de'
def __init__(self,obj,gd):
super(SubmodNode,self).__init__(obj,gd)
del self.used_by
if not self.fromstr:
if obj.ancestor:
self.ancestor = gd.get_node(obj.ancestor,FortranSubmodule)
else:
self.ancestor = gd.get_node(obj.ancestor_mod,FortranModule)
self.ancestor.children.add(self)
self.efferent += 1
self.ancestor.afferent += 1
class TypeNode(BaseNode):
colour = '#5cb85c'
def __init__(self,obj,gd,hist={}):
super(TypeNode,self).__init__(obj)
self.ancestor = None
self.children = set()
self.comp_types = dict()
self.comp_of = dict()
if not self.fromstr:
if obj.extends:
if obj.extends in hist:
self.ancestor = hist[obj.extends]
else:
self.ancestor = gd.get_node(obj.extends,FortranType,newdict(hist,obj,self))
self.ancestor.children.add(self)
self.ancestor.visible = getattr(obj.extends,'visible',True)
for var in obj.local_variables:
if (var.vartype == 'type' or var.vartype == 'class') and var.proto[0] != '*':
if var.proto[0] == obj:
n = self
elif var.proto[0] in hist:
n = hist[var.proto[0]]
else:
n = gd.get_node(var.proto[0],FortranType,newdict(hist,obj,self))
n.visible = getattr(var.proto[0],'visible',True)
if self in n.comp_of:
n.comp_of[self] += ', ' + var.name
else:
n.comp_of[self] = var.name
if n in self.comp_types:
self.comp_types[n] += ', ' + var.name
else:
self.comp_types[n] = var.name
class ProcNode(BaseNode):
@property
def colour(self):
if self.proctype.lower() == 'subroutine':
return '#d9534f'
elif self.proctype.lower() == 'function':
return '#d94e8f'
elif self.proctype.lower() == 'interface':
return '#A7506F'
#~ return '#c77c25'
else:
return super(ProcNode,self).colour
def __init__(self,obj,gd,hist={}):
#ToDo: Figure out appropriate way to handle interfaces to routines in submodules.
self.proctype = getattr(obj,'proctype','')
super(ProcNode,self).__init__(obj)
self.uses = set()
self.calls = set()
self.called_by = set()
self.interfaces = set()
self.interfaced_by = set()
if not self.fromstr:
for u in getattr(obj,'uses',[]):
n = gd.get_node(u,FortranModule)
n.used_by.add(self)
self.uses.add(n)
for c in getattr(obj,'calls',[]):
if getattr(c,'visible',True):
if c == obj:
n = self
elif c in hist:
n = hist[c]
else:
n = gd.get_node(c,FortranSubroutine,newdict(hist,obj,self))
n.called_by.add(self)
self.calls.add(n)
if obj.proctype.lower() == 'interface':
for m in getattr(obj,'modprocs',[]):
if m.procedure and getattr(m.procedure,'visible',True):
if m.procedure in hist:
n = hist[m.procedure]
else:
n = gd.get_node(m.procedure,FortranSubroutine,newdict(hist,obj,self))
n.interfaced_by.add(self)
self.interfaces.add(n)
if hasattr(obj,'procedure') and obj.procedure.module and obj.procedure.module != True and getattr(obj.procedure.module,'visible',True):
if obj.procedure.module in hist:
n = hist[obj.procedure.module]
else:
n = gd.get_node(obj.procedure.module,FortranSubroutine,newdict(hist,obj,self))
n.interfaced_by.add(self)
self.interfaces.add(n)
class ProgNode(BaseNode):
colour = '#f0ad4e'
def __init__(self,obj,gd):
super(ProgNode,self).__init__(obj)
self.uses = set()
self.calls = set()
if not self.fromstr:
for u in obj.uses:
n = gd.get_node(u,FortranModule)
n.used_by.add(self)
self.uses.add(n)
for c in obj.calls:
if getattr(c,'visible',True):
n = gd.get_node(c,FortranSubroutine)
n.called_by.add(self)
self.calls.add(n)
class BlockNode(BaseNode):
colour = '#5cb85c'
def __init__(self,obj,gd):
super(BlockNode,self).__init__(obj)
self.uses = set()
if not self.fromstr:
for u in obj.uses:
n = gd.get_node(u,FortranModule)
n.used_by.add(self)
self.uses.add(n)
class FileNode(BaseNode):
colour = '#f0ad4e'
def __init__(self,obj,gd,hist={}):
super(FileNode,self).__init__(obj)
self.afferent = set() # Things depending on this file
self.efferent = set() # Things this file depends on
if not self.fromstr:
for mod in obj.modules:
for dep in mod.deplist:
if dep.hierarchy[0] == obj:
continue
elif dep.hierarchy[0] in hist:
n = hist[dep.hierarchy[0]]
else:
n = gd.get_node(dep.hierarchy[0],FortranSourceFile,newdict(hist,obj,self))
n.afferent.add(self)
self.efferent.add(n)
for mod in obj.submodules:
for dep in mod.deplist:
if dep.hierarchy[0] == obj:
continue
elif dep.hierarchy[0] in hist:
n = hist[dep.hierarchy[0]]
else:
n = gd.get_node(dep.hierarchy[0],FortranSourceFile,newdict(hist,obj,self))
n.afferent.add(self)
self.efferent.add(n)
for proc in obj.functions + obj.subroutines:
for dep in proc.deplist:
if dep.hierarchy[0] == obj:
continue
elif dep.hierarchy[0] in hist:
n = hist[dep.hierarchy[0]]
else:
n = gd.get_node(dep.hierarchy[0],FortranSourceFile,newdict(hist,obj,self))
n.afferent.add(self)
self.efferent.add(n)
for prog in obj.programs:
for dep in prog.deplist:
if dep.hierarchy[0] == obj:
continue
elif dep.hierarchy[0] in hist:
n = hist[dep.hierarchy[0]]
else:
n = gd.get_node(dep.hierarchy[0],FortranSourceFile,newdict(hist,obj,self))
n.afferent.add(self)
self.efferent.add(n)
for block in obj.blockdata:
for dep in block.deplist:
if dep.hierarchy[0] == obj:
continue
elif dep.hierarchy[0] in hist:
n = hist[dep.hierarchy[0]]
else:
n = gd.get_node(dep.hierarchy[0],FortranSourceFile,newdict(hist,obj,self))
n.afferent.add(self)
self.efferent.add(n)
class FortranGraph(object):
"""
Object used to construct the graph for some particular entity in the code.
"""
data = GraphData()
RANKDIR = 'RL'
def __init__(self,root,webdir='',ident=None):
"""
Initialize the graph, root is the object or list of objects,
for which the graph is to be constructed.
The webdir is the url where the graph should be stored, and
ident can be provided to override the default identifacation
of the graph that will be used to construct the name of the
imagefile. It has to be provided if there are multiple root
nodes.
"""
self.root = [] # root nodes
self.hopNodes = [] # nodes of the hop which exceeded the maximum
self.hopEdges = [] # edges of the hop which exceeded the maximum
self.added = set() # nodes added to the graph
self.max_nesting = 0 # maximum numbers of hops allowed
self.max_nodes = 1 # maximum numbers of nodes allowed
self.warn = False # should warnings be written?
self.truncated = -1 # nesting where the graph was truncated
try:
for r in root:
self.root.append(self.data.get_node(r))
self.max_nesting = max(self.max_nesting,
int(r.meta['graph_maxdepth']))
self.max_nodes = max(self.max_nodes,
int(r.meta['graph_maxnodes']))
self.warn = self.warn or (r.settings['warn'].lower() == 'true')
except TypeError:
self.root.append(self.data.get_node(root))
self.max_nesting = int(root.meta['graph_maxdepth'])
self.max_nodes = max(self.max_nodes,
int(root.meta['graph_maxnodes']))
self.warn = root.settings['warn'].lower() == 'true'
self.webdir = webdir
if ident:
self.ident = ident + '~~' + self.__class__.__name__
else:
self.ident = root.get_dir() + '~~' + root.ident + '~~' + self.__class__.__name__
self.imgfile = self.ident
self.dot = Digraph(self.ident,
graph_attr={'size':'8.90625,1000.0',
'rankdir':self.RANKDIR,
'concentrate':'true',
'id':self.ident},
node_attr={'shape':'box',
'height':'0.0',
'margin':'0.08',
'fontname':'Helvetica',
'fontsize':'10.5'},
edge_attr={'fontname':'Helvetica',
'fontsize':'9.5'},
format='svg', engine='dot')
# add root nodes to the graph
for n in self.root:
if len(self.root) == 1:
self.dot.node(n.ident, label=n.name)
else:
self.dot.node(n.ident, **n.attribs)
self.added.add(n)
# add nodes and edges depending on the root nodes to the graph
self.add_nodes(self.root)
#~ self.linkmap = self.dot.pipe('cmapx').decode('utf-8')
if graphviz_installed:
self.svg_src = self.dot.pipe().decode('utf-8')
self.svg_src = self.svg_src.replace('<svg ','<svg id="' + re.sub('[^\w]','',self.ident) + '" ')
w = int(WIDTH_RE.search(self.svg_src).group(1))
if isinstance(self,(ModuleGraph,CallGraph,TypeGraph)):
self.scaled = (w >= 855)
else:
self.scaled = (w >= 641)
else:
self.svg_src = ''
self.scaled = False
def add_to_graph(self, nodes, edges, nesting):
"""
Adds nodes and edges to the graph as long as the maximum number
of nodes is not exceeded.
All edges are expected to have a reference to an entry in nodes.
If the list of nodes is not added in the first hop due to graph
size limitations, they are stored in hopNodes.
If the graph was extended the function returns True, otherwise the
result will be False.
"""
if (len(nodes) + len(self.added)) > self.max_nodes:
if nesting < 2:
self.hopNodes = nodes
self.hopEdges = edges
self.truncated = nesting
return False
else:
for n in nodes:
self.dot.node(n.ident, **n.attribs)
for e in edges:
if len(e) == 5:
self.dot.edge(e[0].ident, e[1].ident, style=e[2],
color=e[3], label=e[4])
else:
self.dot.edge(e[0].ident, e[1].ident, style=e[2],
color=e[3])
self.added.update(nodes)
return True
def __str__(self):
"""
The string of the graph is its HTML representation.
It will only be created if it is not too large.
If the graph is overly large but can represented by a single node
with many dependencies it will be shown as a table instead to ease
the rendering in browsers.
"""
graph_as_table = len(self.hopNodes) > 0 and len(self.root) == 1
# Do not render empty graphs
if len(self.added) <= 1 and not graph_as_table:
return ''
# Do not render overly large graphs.
if len(self.added) > self.max_nodes:
if self.warn:
print('Warning: Not showing graph {0} as it would exceed the maximal number of {1} nodes.'
.format(self.ident,self.max_nodes))
# Only warn once about this
self.warn = False
return ''
# Do not render incomplete graphs.
if len(self.added) < len(self.root):
if self.warn:
print('Warning: Not showing graph {0} as it would be incomplete.'.format(self.ident))
# Only warn once about this
self.warn = False
return ''
if self.warn and self.truncated > 0:
print('Warning: Graph {0} is truncated after {1} hops.'.format(self.ident,self.truncated))
# Only warn once about this
self.warn = False
zoomName = ''
svgGraph = ''
rettext = ''
if graph_as_table:
# generate a table graph if maximum number of nodes gets exceeded in
# the first hop and there is only one root node.
root = '<td class="root" rowspan="{0}">{1}</td>'.format(
len(self.hopNodes) * 2 + 1, self.root[0].attribs['label'])
if self.hopEdges[0][0].ident == self.root[0].ident:
key = 1
root_on_left = (self.RANKDIR == 'LR')
if root_on_left:
arrowtemp = ('<td class="{0}{1}">{2}</td><td rowspan="2"'
+ 'class="triangle-right"></td>')
else:
arrowtemp = ('<td rowspan="2" class="triangle-left">'
+ '</td><td class="{0}{1}">{2}</td>')
else:
key = 0
root_on_left = (self.RANKDIR == 'RL')
if root_on_left:
arrowtemp = ('<td rowspan="2" class="triangle-left">'
+ '</td><td class="{0}{1}">{2}</td>')
else:
arrowtemp = ('<td class="{0}{1}">{2}</td><td rowspan="2"'
+ 'class="triangle-right"></td>')
# sort nodes in alphabetical order
self.hopEdges.sort(key=lambda x: x[key].attribs['label'].lower())
rows = ''
for i in range(len(self.hopEdges)):
e = self.hopEdges[i]
n = e[key]
if len(e) == 5:
arrow = arrowtemp.format(e[2], 'Text', e[4])
else:
arrow = arrowtemp.format(e[2], 'Bottom', 'w')
node = '<td rowspan="2" class="node" bgcolor="{0}">'.format(
n.attribs['color'])
try:
node += '<a href="{0}">{1}</a></td>'.format(
n.attribs['URL'], n.attribs['label'])
except:
node += n.attribs['label'] + '</td>'
if root_on_left:
rows += '<tr>' + root + arrow + node + '</tr>\n'
else:
rows += '<tr>' + node + arrow + root + '</tr>\n'
rows += '<tr><td class="{0}Top">w</td></tr>\n'.format(e[2])
root = ''
rettext += '<table class="graph">\n' + rows + '</table>\n'
# generate svg graph
else:
rettext += '<div class="depgraph">{0}</div>'
svgGraph = self.svg_src
# add zoom ability for big graphs
if self.scaled:
zoomName = re.sub('[^\w]', '', self.ident)
rettext += ('<script>var pan{1} = svgPanZoom(\'#{1}\', '
'{{zoomEnabled: true,controlIconsEnabled: true, '
'fit: true, center: true,}}); </script>')
rettext += ('<div><a type="button" class="graph-help" '
'data-toggle="modal" href="#graph-help-text">Help</a>'
'</div><div class="modal fade" id="graph-help-text" '
'tabindex="-1" role="dialog"><div class="modal-dialog '
'modal-lg" role="document"><div class="modal-content">'
'<div class="modal-header"><button type="button" '
'class="close" data-dismiss="modal" aria-label="Close">'
'<span aria-hidden="true">×</span></button><h4 class'
'="modal-title" id="-graph-help-label">Graph Key</h4>'
'</div><div class="modal-body">{2}</div></div></div>'
'</div>')
return rettext.format(svgGraph, zoomName, self.get_key())
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return(bool(self.__str__()))
@classmethod
def reset(cls):
cls.data = GraphData()
def create_svg(self, out_location):
if len(self.added) > len(self.root):
self._create_image_file(os.path.join(out_location, self.imgfile))
def _create_image_file(self,filename):
if graphviz_installed:
self.dot.render(filename,cleanup=False)
shutil.move(filename,os.path.join(os.path.dirname(filename),
os.path.basename(filename)+'.gv'))
class ModuleGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return MOD_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes and edges for generating the graph showing the relationship
between modules and submodules listed in nodes.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for nu in n.uses:
if nu not in self.added:
hopNodes.add(nu)
hopEdges.append((n, nu, 'dashed', colour))
if hasattr(n, 'ancestor'):
if n.ancestor not in self.added:
hopNodes.add(n.ancestor)
hopEdges.append((n, n.ancestor, 'solid', colour))
# add nodes, edges and attributes to the graph if maximum number of
# nodes is not exceeded
if self.add_to_graph(hopNodes, hopEdges, nesting):
self.dot.attr('graph', size='11.875,1000.0')
class UsesGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return MOD_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for the modules used by those listed in nodes. Adds
edges between them. Also does this for ancestor (sub)modules.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for nu in n.uses:
if nu not in self.added:
hopNodes.add(nu)
hopEdges.append((n, nu, 'dashed', colour))
if hasattr(n, 'ancestor'):
if n.ancestor not in self.added:
hopNodes.add(n.ancestor)
hopEdges.append((n, n.ancestor, 'solid', colour))
# add nodes and edges for this hop to the graph if maximum number of
# nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class UsedByGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return MOD_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for modules using or descended from those listed in
nodes. Adds appropriate edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for nu in getattr(n, 'used_by', []):
if nu not in self.added:
hopNodes.add(nu)
hopEdges.append((nu, n, 'dashed', colour))
for c in getattr(n, 'children', []):
if c not in self.added:
hopNodes.add(c)
hopEdges.append((c, n, 'solid', colour))
# add nodes and edges for this hop to the graph if maximum number of
# nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class FileGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return FILE_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds edges showing dependencies between source files listed in
the nodes.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for ne in n.efferent:
if ne not in self.added:
hopNodes.add(ne)
hopEdges.append((ne, n, 'solid', colour))
# add nodes and edges to the graph if maximum number of nodes is not
# exceeded
self.add_to_graph(hopNodes, hopEdges, nesting)
class EfferentGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return FILE_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for the files which this one depends on. Adds
edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for ne in n.efferent:
if ne not in self.added:
hopNodes.add(ne)
hopEdges.append((n, ne, 'dashed', colour))
# add nodes and edges for this hop to the graph if maximum number of
# nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class AfferentGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return FILE_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for files which depend upon this one. Adds appropriate
edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for na in n.afferent:
if na not in self.added:
hopNodes.add(na)
hopEdges.append((na, n, 'dashed', colour))
# add nodes and edges for this hop to the graph if maximum number of
# nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class TypeGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return TYPE_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds edges showing inheritance and composition relationships
between derived types listed in the nodes.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for keys in n.comp_types.keys():
if keys not in self.added:
hopNodes.add(keys)
for c in n.comp_types:
if c not in self.added:
hopNodes.add(c)
hopEdges.append((n, c, 'dashed', colour, n.comp_types[c]))
if n.ancestor:
if n.ancestor not in self.added:
hopNodes.add(n.ancestor)
hopEdges.append((n, n.ancestor, 'solid', colour))
# add nodes, edges and attributes to the graph if maximum number of
# nodes is not exceeded
if self.add_to_graph(hopNodes, hopEdges, nesting):
self.dot.attr('graph', size='11.875,1000.0')
class InheritsGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return TYPE_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for modules using or descended from those listed in
nodes. Adds appropriate edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for c in n.comp_types:
if c not in self.added:
hopNodes.add(c)
hopEdges.append((n, c, 'dashed', colour, n.comp_types[c]))
if n.ancestor:
if n.ancestor not in self.added:
hopNodes.add(n.ancestor)
hopEdges.append((n, n.ancestor, 'solid', colour))
# add nodes and edges for this hop to the graph if maximum number of
# nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class InheritedByGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return TYPE_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for modules using or descended from those listed in
nodes. Adds appropriate edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for c in n.comp_of:
if c not in self.added:
hopNodes.add(c)
hopEdges.append((c, n, 'dashed', colour, n.comp_of[c]))
for c in n.children:
if c not in self.added:
hopNodes.add(c)
hopEdges.append((c, n, 'solid', colour))
# add nodes and edges for this hop to the graph if maximum number of
# nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class CallGraph(FortranGraph):
RANKDIR = 'LR'
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return CALL_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds edges indicating the call-tree for the procedures listed in
the nodes.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for p in n.calls:
if p not in hopNodes:
hopNodes.add(p)
hopEdges.append((n, p, 'solid', colour))
for p in getattr(n, 'interfaces', []):
if p not in hopNodes:
hopNodes.add(p)
hopEdges.append((n, p, 'dashed', colour))
# add nodes, edges and attributes to the graph if maximum number of
# nodes is not exceeded
if self.add_to_graph(hopNodes, hopEdges, nesting):
self.dot.attr('graph', size='11.875,1000.0')
self.dot.attr('graph', concentrate='false')
class CallsGraph(FortranGraph):
RANKDIR = 'LR'
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return CALL_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for modules using or descended from those listed in
nodes. Adds appropriate edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for p in n.calls:
if p not in self.added:
hopNodes.add(p)
hopEdges.append((n, p, 'solid', colour))
for p in getattr(n, 'interfaces', []):
if p not in self.added:
hopNodes.add(p)
hopEdges.append((n, p, 'dashed', colour))
# add nodes, edges and atrributes for this hop to the graph if
# maximum number of nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.dot.attr('graph', concentrate='false')
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class CalledByGraph(FortranGraph):
RANKDIR = 'LR'
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return CALL_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for modules using or descended from those listed in
nodes. Adds appropriate edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
if isinstance(n, ProgNode):
continue
for p in n.called_by:
if p not in self.added:
hopNodes.add(p)
hopEdges.append((p, n, 'solid', colour))
for p in getattr(n, 'interfaced_by', []):
if p not in self.added:
hopNodes.add(p)
hopEdges.append((p, n, 'dashed', colour))
# add nodes, edges and atrributes for this hop to the graph if
# maximum number of nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.dot.attr('graph', concentrate='false')
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class BadType(Exception):
"""
Raised when a type is passed to GraphData.register() which is not
accepted.
"""
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
# Generate graph keys
gd = GraphData()
class Proc(object):
def __init__(self,name,proctype):
self.name = name
self.proctype = proctype
self.ident = ''
def get_url(self):
return ''
def get_dir(self):
return ''
sub = Proc('Subroutine','Subroutine')
func = Proc('Function','Function')
intr = Proc('Interface','Interface')
gd.register('Module',FortranModule)
gd.register('Submodule',FortranSubmodule)
gd.register('Type',FortranType)
gd.register(sub,FortranSubroutine)
gd.register(func,FortranFunction)
gd.register(intr,FortranInterface)
gd.register('Unknown Procedure Type',FortranSubroutine)
gd.register('Program',FortranProgram)
gd.register('Source File',FortranSourceFile)
try:
# Generate key for module graph
dot = Digraph('Graph Key',graph_attr={'size':'8.90625,1000.0',
'concentrate':'false'},
node_attr={'shape':'box',
'height':'0.0',
'margin':'0.08',
'fontname':'Helvetica',
'fontsize':'10.5'},
edge_attr={'fontname':'Helvetica',
'fontsize':'9.5'},
format='svg', engine='dot')
for n in [('Module',FortranModule),('Submodule',FortranSubmodule),(sub,FortranSubroutine),(func,FortranFunction),('Program', FortranProgram)]:
dot.node(getattr(n[0],'name',n[0]),**gd.get_node(n[0],cls=n[1]).attribs)
dot.node('This Page\'s Entity')
mod_svg = dot.pipe().decode('utf-8')
# Generate key for type graph
dot = Digraph('Graph Key',graph_attr={'size':'8.90625,1000.0',
'concentrate':'false'},
node_attr={'shape':'box',
'height':'0.0',
'margin':'0.08',
'fontname':'Helvetica',
'fontsize':'10.5'},
edge_attr={'fontname':'Helvetica',
'fontsize':'9.5'},
format='svg', engine='dot')
dot.node('Type',**gd.get_node('Type',cls=FortranType).attribs)
dot.node('This Page\'s Entity')
type_svg = dot.pipe().decode('utf-8')
# Generate key for call graph
dot = Digraph('Graph Key',graph_attr={'size':'8.90625,1000.0',
'concentrate':'false'},
node_attr={'shape':'box',
'height':'0.0',
'margin':'0.08',
'fontname':'Helvetica',
'fontsize':'10.5'},
edge_attr={'fontname':'Helvetica',
'fontsize':'9.5'},
format='svg', engine='dot')
for n in [(sub,FortranSubroutine),(func,FortranFunction),(intr, FortranInterface),('Unknown Procedure Type',FortranFunction),('Program', FortranProgram)]:
dot.node(getattr(n[0],'name',n[0]),**gd.get_node(n[0],cls=n[1]).attribs)
dot.node('This Page\'s Entity')
call_svg = dot.pipe().decode('utf-8')
# Generate key for file graph
dot = Digraph('Graph Key',graph_attr={'size':'8.90625,1000.0',
'concentrate':'false'},
node_attr={'shape':'box',
'height':'0.0',
'margin':'0.08',
'fontname':'Helvetica',
'fontsize':'10.5'},
edge_attr={'fontname':'Helvetica',
'fontsize':'9.5'},
format='svg', engine='dot')
dot.node('Source File',**gd.get_node('Source File',cls=FortranSourceFile).attribs)
dot.node('This Page\'s Entity')
file_svg = dot.pipe().decode('utf-8')
except RuntimeError:
graphviz_installed = False
if graphviz_installed:
NODE_DIAGRAM = """
<p>Nodes of different colours represent the following: </p>
{}
"""
MOD_GRAPH_KEY = (NODE_DIAGRAM + """
<p>Solid arrows point from a submodule to the (sub)module which it is
descended from. Dashed arrows point from a module or program unit to
modules which it uses.{{}}
</p>
""").format(mod_svg)
TYPE_GRAPH_KEY = (NODE_DIAGRAM + """
<p>Solid arrows point from a derived type to the parent type which it
extends. Dashed arrows point from a derived type to the other
types it contains as a components, with a label listing the name(s) of
said component(s).{{}}
</p>
""").format(type_svg)
CALL_GRAPH_KEY = (NODE_DIAGRAM + """
<p>Solid arrows point from a procedure to one which it calls. Dashed
arrows point from an interface to procedures which implement that interface.
This could include the module procedures in a generic interface or the
implementation in a submodule of an interface in a parent module.{{}}
</p>
""").format(call_svg)
FILE_GRAPH_KEY = (NODE_DIAGRAM + """
<p>Solid arrows point from a file to a file which it depends on. A file
is dependent upon another if the latter must be compiled before the former
can be.{{}}
</p>
""").format(file_svg)
COLOURED_NOTICE = " Where possible, edges connecting nodes are given " \
"different colours to make them easier to distinguish " \
"in large graphs."
del call_svg
del file_svg
del type_svg
del mod_svg
del dot
del sub
del func
del intr
| gpl-3.0 | 925,564,929,856,560,600 | 39.329716 | 206 | 0.522881 | false | 3.937653 | false | false | false |
fedora-desktop-tests/evolution | features/steps/calendar_event_editor.py | 1 | 22024 | # -*- coding: UTF-8 -*-
from behave import step, then
from dogtail.predicate import GenericPredicate
from dogtail.tree import root
from dogtail.rawinput import keyCombo, typeText
from time import sleep
from behave_common_steps import wait_until
import datetime
import os
@step(u'Create new appointment')
def create_new_appointment(context):
context.app.instance.menu('File').click()
context.app.instance.menu('File').menu('New').point()
context.app.instance.menu('File').menu('New').menuItem('Appointment').click()
context.execute_steps(u"""
* Event editor with title "Appointment - No Summary" is displayed
""")
@step(u'Create new all day appointment')
def create_new_all_day_appointment(context):
context.app.instance.menu('File').click()
context.app.instance.menu('File').menu('New').point()
context.app.instance.menu('File').menu('New').menuItem('All Day Appointment').click()
context.execute_steps(u"""
* Event editor with title "Appointment - No Summary" is displayed
""")
@step(u'Create new meeting')
def create_new_meeting(context):
context.app.instance.menu('File').click()
context.app.instance.menu('File').menu('New').point()
context.app.instance.menu('File').menu('New').menuItem('Meeting').click()
context.execute_steps(u"""
* Event editor with title "Meeting - No Summary" is displayed
""")
@step(u'Event editor with title "{name}" is displayed')
def event_editor_with_name_displayed(context, name):
context.app.event_editor = context.app.instance.window(name)
@step(u'Save the meeting and choose not to send meeting invitations')
def save_meeting(context):
save_meeting_and_send_notifications(context, send=False)
@step(u'Save the meeting and send meeting invitations')
def save_meeting_and_send_notifications(context, send=True):
context.app.event_editor.button('Save and Close').click()
sleep(3)
if context.app.instance.findChildren(GenericPredicate(roleName='dialog', name='')):
dialog = context.app.instance.dialog(' ')
dialog.grabFocus()
if send:
dialog.button('Send').doActionNamed('click')
else:
dialog.button('Do not Send').doActionNamed('click')
assert wait_until(lambda x: x.dead, dialog),\
"Meeting invitations dialog was not closed"
assert wait_until(lambda x: x.dead, context.app.event_editor),\
"Meeting editor was not closed"
@step(u'Save the event and close the editor')
def save_event(context):
context.app.event_editor.button('Save and Close').click()
assert wait_until(lambda x: x.dead and not x.showing, context.app.event_editor),\
"Meeting editor is still visible"
@step(u'Set "{field}" field in event editor to "{value}"')
def set_field_in_event_editor(context, field, value):
if field == 'Calendar:':
# This cmb has no 'click' action, so use a custom set of actions
cmb = context.app.event_editor.childLabelled('Calendar:')
cmb.doActionNamed('press')
# Calendars have 4 spaces before the actual name
cmb.menuItem(' %s' % value).click()
text_fields = ['Summary:', 'Location:', 'Description:']
if field in text_fields:
context.app.event_editor.childLabelled(field).text = value
if field == 'Time:':
if ' ' in value:
(day, time) = value.split(' ')
context.app.event_editor.\
childLabelled('Time:').textentry('').text = time
else:
day = value
context.app.event_editor.child('Date').text = day
if field in ["For:", "Until:"]:
combo = context.app.event_editor.\
child(name='for', roleName='menu item').\
findAncestor(GenericPredicate(roleName='combo box'))
field_combovalue = field.lower()[:-1]
if combo.combovalue != field_combovalue:
combo.combovalue = field_combovalue
if field_combovalue == 'for':
(hours, minutes) = value.split(':')
spins = context.app.event_editor.findChildren(
GenericPredicate(roleName='spin button'))
spins[0].text = hours
spins[0].grab_focus()
keyCombo('<Enter>')
spins[1].text = minutes
spins[1].grab_focus()
keyCombo('<Enter>')
else:
filler = context.app.event_editor.child('until').parent.\
findChildren(GenericPredicate(roleName='filler'))[-2]
if ' ' in value:
(day, time) = value.split(' ')
filler.child(roleName='combo box').textentry('').text = time
else:
day = value
filler.child('Date').text = day
if field == 'Timezone:':
context.app.event_editor.button('Select Timezone').click()
dlg = context.app.instance.dialog('Select a Time Zone')
dlg.child('Timezone drop-down combination box').combovalue = value
dlg.button('OK').click()
assert wait_until(lambda x: x.dead, dlg),\
"'Select Time Zone' dialog was not closed"
if field == 'Categories:':
context.app.event_editor.button('Categories...').click()
context.app.categories = context.app.instance.dialog('Categories')
for category in value.split(','):
context.execute_steps(u'* Check "%s" category' % category.strip())
context.execute_steps(u'* Close categories dialog')
@step(u'Set the following fields in event editor')
def set_several_fields(context):
for row in context.table:
set_field_in_event_editor(context, row['Field'], row['Value'])
@step(u'"{field}" field is set to "{value}"')
def field_is_set_to(context, field, value):
value = value.strip()
text_fields = ['Summary:', 'Location:', 'Description:']
if field in text_fields:
actual = context.app.event_editor.childLabelled(field).text
context.assertion.assertEquals(actual, value)
if field == 'Time:':
day = context.app.event_editor.child('Date').text
if ' ' in value:
time = context.app.event_editor.\
childLabelled('Time:').textentry('').text
actual = '%s %s' % (day, time)
context.assertion.assertEquals(actual.lower(), value.lower())
else:
# All day event
context.assertion.assertEquals(day, value)
time_showing = context.app.event_editor.childLabelled('Time:').showing
context.assertion.assertFalse(
time_showing, "Time controls are displayed in all day event")
if field == 'For:':
# Ensure that correct value is set in combobox
combo = context.app.event_editor.child(name='for', roleName='combo box')
spins = context.app.event_editor.findChildren(GenericPredicate(roleName='spin button'))
if ' ' in value:
actual = '%s:%s' % (spins[0], spins[1])
context.assertion.assertEquals(actual.lower(), value.lower())
else:
context.assertion.assertFalse(
spins[0].showing, "Time controls are displayed in all day event")
context.assertion.assertFalse(
spins[1].showing, "Time controls are displayed in all day event")
if field == 'Until:':
combo = context.app.event_editor.child(name='until', roleName='combo box')
filler = combo.parent.findChildren(GenericPredicate(roleName='filler'))[-2]
day = filler.child('Date').text
if ' ' in value:
time = filler.child(roleName='combo box').textentry('').text
actual = '%s %s' % (day, time)
context.assertion.assertEquals(actual.lower(), value.lower())
else:
# All day event
context.assertion.assertEquals(day, value)
time_showing = filler.child(roleName='combo box').textentry('').showing
context.assertion.assertFalse(
time_showing, "Time controls are displayed in all day event")
if field == 'Calendar:':
cmb = context.app.event_editor.childLabelled('Calendar:')
actual = cmb.combovalue.strip()
context.assertion.assertEquals(actual, value)
if field == 'Timezone:':
actual = context.app.event_editor.childLabelled('Time zone:').text
context.assertion.assertEquals(actual, value)
if field == 'Categories:':
actual = context.app.event_editor.textentry('Categories').text
context.assertion.assertEquals(actual, value)
@step(u'Event has the following details')
def event_has_fields_set(context):
for row in context.table:
context.execute_steps(u"""
* "%s" field is set to "%s"
""" % (row['Field'], row['Value']))
@step(u'Add "{name}" as attendee')
def add_user_as_attendee_with_role(context, name):
context.app.event_editor.button('Add').click()
# Input user name
typeText(name)
keyCombo('<Enter>')
# Evolution doesn't have a11y set for cell renderers, so role cannot be set
#table = context.app.event_editor.child(roleName='table')
# User will be added as a last row, so last cell is user role selector
#cell = table.findChildren(GenericPredicate(roleName='table cell'))[-1]
#cell.click()
@step(u'Remove "{name}" from attendee list')
def remove_user_from_attendee_list(context, name):
context.app.event_editor.child(name=name, roleName='table cell').click()
context.app.event_editor.button('Remove').click()
@step(u'Select first suggestion as attendee typing "{name}"')
def select_first_suggestion_as_attendee(context, name):
context.app.event_editor.button('Add').click()
typeText(name)
sleep(1)
# Again, cell renderer is not avaiable here
keyCombo("<Down>")
keyCombo("<Enter>")
sleep(0.5)
@then(u'"{user}" as "{role}" is present in attendees list')
def user_with_role_is_present_in_attendees_list(context, user, role):
table = context.app.event_editor.child(roleName='table')
cells = table.findChildren(GenericPredicate(roleName='table cell'))
found_indexes = [cells.index(c) for c in cells if c.text == user]
if found_indexes == []:
raise AssertionError("User '%s' was not found in attendees list" % user)
role_cell_index = found_indexes[0] + 1
if role_cell_index > len(cells):
raise AssertionError("Cannot find role cell for user '%s'" % user)
actual = cells[role_cell_index].text
context.assertion.assertEquals(actual, role)
@step(u'The following attendees are present in the list')
def verify_attendees_list_presence(context):
for row in context.table:
context.execute_steps(u"""
Then "%s" as "%s" is present in attendees list
""" % (row['Name'], row['Role']))
@step(u'Open attendees dialog')
def open_attendees_dialog(context):
context.app.event_editor.button('Attendees...').click()
context.app.attendees = context.app.instance.dialog('Attendees')
@step(u'Close attendees dialog')
def close_attendees_dialog(context):
context.app.attendees.button('Close').click()
assert wait_until(lambda x: not x.showing, context.app.attendees),\
"Attendees dialog was not closed"
@step(u'Change addressbook to "{name}" in attendees dialog')
def change_addressbook_in_attendees_dialog(context, name):
context.app.attendees.childLabelled('Address Book:').combovalue = ' %s' % name
@step(u'Add "{name}" contact as "{role}" in attendees dialog')
def add_contact_as_role_in_attendees_dialog(context, name, role):
contacts = context.app.attendees.childLabelled('Contacts').child(roleName='table')
contact = contacts.child(name)
contact.select()
btn = context.app.attendees.child('%ss' % role).parent.parent.parent.button('Add')
btn.click()
@step(u'Add "{user}" as "{role}" using Attendees dialog')
def add_contact_as_role_using_attendees_dialog(context, user, role):
context.execute_steps(u"""
* Open attendees dialog
* Add "%s" contact as "%s" in attendees dialog
* Close attendees dialog
""" % (user, role))
@step(u'Add "{user}" as "{role}" using Attendees dialog from "{addressbook}" addressbook')
def add_contact_from_addressbook_as_role_using_attendees_dialog(context, user, role, addressbook):
context.execute_steps(u"""
* Open attendees dialog
* Change addressbook to "%s" in attendees dialog
* Add "%s" contact as "%s" in attendees dialog
* Close attendees dialog
""" % (addressbook, user, role))
@step(u'Search for "{username}" in Attendees dialog in "{addressbook}" addressbook')
def search_for_user_in_attendees_dialog(context, username, addressbook):
context.execute_steps(u"""
* Open attendees dialog
* Change addressbook to "%s" in attendees dialog
""" % addressbook)
context.app.attendees.childLabelled('Search:').text = username
sleep(1)
@step(u'Show time zone in event editor')
def show_timezone(context):
if not context.app.event_editor.child('Time zone:').showing:
context.app.event_editor.menu('View').click()
context.app.event_editor.menu('View').menuItem('Time Zone').click()
@step(u'Show categories in event editor')
def show_categories(context):
if not context.app.event_editor.textentry('Categories').showing:
context.app.event_editor.menu('View').click()
context.app.event_editor.menu('View').menuItem('Categories').click()
@step(u'Set event start time in {num} minute')
@step(u'Set event start time in {num} minutes')
def set_event_start_time_in(context, num):
time = context.app.event_editor.childLabelled('Time:').textentry('').text
time_object = datetime.datetime.strptime(time.strip(), '%H:%M %p')
new_time_object = time_object + datetime.timedelta(minutes=int(num))
new_time = new_time_object.strftime('%H:%M %p')
context.app.event_editor.childLabelled('Time:').textentry('').text = new_time
context.app.event_editor.childLabelled('Time:').textentry('').keyCombo('<Enter>')
@step(u'Set event start date in {num} day')
@step(u'Set event start date in {num} days')
def set_event_start_date_in(context, num):
date = context.app.event_editor.child('Date').text
date_object = datetime.datetime.strptime(date, '%m/%d/%Y')
new_date_object = date_object + datetime.timedelta(days=int(num))
new_date = new_date_object.strftime('%m/%d/%Y')
context.app.event_editor.child('Date').text = ''
context.app.event_editor.child('Date').typeText(new_date)
context.app.event_editor.childLabelled('Time:').textentry('').click()
@step(u'Open reminders window')
def open_reminders_window(context):
context.app.event_editor.button('Reminders').click()
context.app.reminders = context.app.instance.dialog('Reminders')
@step(u'Select predefined reminder "{name}"')
def select_predefined_reminder(context, name):
context.app.reminders.child(roleName='combo box').combovalue = name
@step(u'Select custom reminder')
def select_custom_reminder(context):
context.app.reminders.child(roleName='combo box').combovalue = 'Customize'
@step(u'Add new reminder with "{action}" {num} {period} {before_after} "{start_end}"')
def add_new_custom_reminder(context, action, num, period, before_after, start_end):
context.app.reminders.button('Add').click()
dialog = context.app.instance.dialog('Add Reminder')
for value in [action, period, before_after, start_end]:
combo = dialog.child(value, roleName='menu item').parent.parent
if combo.combovalue != value:
combo.combovalue = value
spin_button = dialog.child(roleName='spin button')
spin_button.text = num
spin_button.grab_focus()
keyCombo('<Enter>')
dialog.button('OK').click()
assert wait_until(lambda x: x.dead, dialog), "Add Reminder dialog was not closed"
@step(u'Add new reminder with the following options')
def add_new_reminder_with_following_options(context):
context.app.reminders.button('Add').click()
dialog = context.app.instance.dialog('Add Reminder')
for row in context.table:
if row['Field'] in ['Action', 'Period', 'Before/After', 'Start/End']:
value = row['Value']
combo = dialog.child(value, roleName='menu item').parent.parent
if combo.combovalue != value:
combo.combovalue = value
elif row['Field'] == 'Num':
spin_button = dialog.child(roleName='spin button')
spin_button.text = row['Value']
spin_button.grab_focus()
keyCombo('<Enter>')
elif row['Field'] == 'Message':
dialog.child('Custom message').click()
# dialog.childLabelled('Message:').text = row['Value']
dialog.child(roleName='text').text = row['Value']
else:
dialog.childLabelled(row['Field']).text = row['Value']
dialog.button('OK').click()
assert wait_until(lambda x: x.dead, dialog), "Add Reminder dialog was not closed"
@step(u'Close reminders window')
def close_reminders_window(context):
context.app.reminders.button('Close').click()
assert wait_until(lambda x: not x.showing, context.app.reminders),\
"Reminders dialog was not closed"
@step(u'Appointment reminders window pops up in {num:d} minute')
@step(u'Appointment reminders window pops up in {num:d} minutes')
def appointment_reminders_window_pops_up(context, num):
alarm_notify = root.application('evolution-alarm-notify')
assert wait_until(
lambda x: x.findChildren(GenericPredicate(name='Appointments')) != [],
element=alarm_notify, timeout=60 * int(num)),\
"Appointments window didn't appear"
context.app.alarm_notify = alarm_notify.child(name='Appointments')
@step(u'Appointment reminders window contains reminder for "{name}" event')
def alarm_notify_contains_event(context, name):
reminders = context.app.alarm_notify.findChildren(
GenericPredicate(roleName='table cell'))
matching_reminders = [x for x in reminders if name in x.text]
assert matching_reminders != [], "Cannot find reminder '%s'" % name
@step(u'Application trigger warning pops up in {num} minutes')
def application_trigger_warning_pops_up(context, num):
alarm_notify = root.application('evolution-alarm-notify')
assert wait_until(
lambda x: x.findChildren(GenericPredicate(name='Warning', roleName='dialog')) != [],
element=alarm_notify, timeout=60 * int(num)),\
"Warning window didn't appear"
@step(u'{action} to run the specified program in application trigger warning window')
def action_to_run_specified_program(context, action):
alarm_notify = root.application('evolution-alarm-notify')
dialog = alarm_notify.dialog('Warning')
if action == 'Agree':
dialog.button('Yes').click()
else:
dialog.button('No').click()
@step(u'"{app}" is present in process list')
def app_is_present_in_process_list(context, app):
try:
assert root.application(app)
finally:
os.system("killall gnome-screenshot")
@step(u'"{app}" is not present in process list')
def app_is_not_present_in_process_list(context, app):
try:
app_names = map(lambda x: x.name, root.applications())
assert app not in app_names
finally:
os.system("killall %s" % app)
@step(u'Add "{filepath}" attachment in event editor')
def add_attachement_in_event_editor(context, filepath):
context.app.event_editor.button("Add Attachment...").click()
context.execute_steps(u"""
* file select dialog with name "Add Attachment" is displayed
* in file select dialog I select "%s"
""" % filepath)
@step(u'Save attachment "{name}" in event editor to "{file}"')
def save_attachment_to_file(context, name, file):
# Switch to List View
combo = context.app.event_editor.child(roleName='menu item', name='List View').parent.parent
if combo.name != 'List View':
combo.combovalue = 'List View'
# Right-click on the cell
cells = context.app.event_editor.findChildren(GenericPredicate(roleName='table cell'))
matching_cells = [x for x in cells if name in x.name]
if matching_cells == []:
raise RuntimeError("Cannot find attachment containing '%s'" % name)
cell = matching_cells[0]
cell.click(button=3)
# Get popup menu
popup_menu = context.app.instance.child(name='Add Attachment...', roleName='menu item').parent
popup_menu.child('Save As').click()
context.execute_steps(u"""
* Save attachment "%s" in mail viewer to "%s"
""" % (name, file))
@step(u'Display attendee {field}')
def show_attendee_field(context, field):
context.app.event_editor.menu('View').click()
menuItem = context.app.event_editor.menu('View').menuItem('%s Field' % field.capitalize())
if not menuItem.checked:
menuItem.click()
else:
keyCombo('<Esc>')
def get_contact_parameter_by_name(context, contact_name, column):
# Get attendees table
table = context.app.event_editor.child(roleName='table')
# Get header offset
headers = table.findChildren(GenericPredicate(roleName='table column header'))
header_names = [x.name for x in headers]
offset = header_names.index(column)
# Get table cells
cells = table.findChildren(GenericPredicate(roleName='table cell'))
found_indexes = [cells.index(c) for c in cells if c.text == str(contact_name)]
if found_indexes == []:
raise AssertionError("User '%s' was not found in attendees list" % contact_name)
cell_index = found_indexes[0] + offset
if cell_index > len(cells):
raise AssertionError("Cannot find '%s' cell for user '%s'" % (column, contact_name))
return cells[cell_index]
@step(u'Attendee "{name}" has "{status}" status')
def attendee_has_status(context, name, status):
actual = get_contact_parameter_by_name(context, name, 'Status').text
context.assertion.assertEquals(actual, status)
| gpl-2.0 | -8,544,367,351,086,763,000 | 38.188612 | 98 | 0.65583 | false | 3.618203 | false | false | false |
elioth010/lugama | src/model/orm/Model.py | 1 | 1482 | '''
Created on Jan 8, 2016
@author: elioth010
'''
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy.sql.expression import text
from model.orm.DB import DB
class Model(DB):
'''
classdocs
'''
base = None
SessionFactory = None
session = None
def __init__(self):
'''
Constructor
'''
self.base = declarative_base()
self.SessionFactory = sessionmaker(bind=self.engine)
self.session = self.SessionFactory()
def save(self):
self.session = self.SessionFactory()
try:
self.session.add(self)
self.session.commit()
except:
self.session.rollback()
raise
def where(self, *args):
self.session = self.SessionFactory()
try:
return self.session.query(self).filter_by(args).all()
except:
self.session.rollback()
raise
def find(self, id_table):
self.session = self.SessionFactory()
try:
return self.session.query(self).filter(text('id='+id_table)).all()
except:
self.session.rollback()
raise
def delete(self):
self.session = self.SessionFactory()
try:
self.session.delete(self)
self.session.commit()
except:
self.session.rollback()
raise
| gpl-2.0 | -388,907,434,122,378,400 | 22.903226 | 78 | 0.557355 | false | 4.384615 | false | false | false |
mushtaqak/edx-platform | lms/envs/devstack.py | 1 | 6327 | """
Specific overrides to the base prod settings to make development easier.
"""
from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import
# Don't use S3 in devstack, fall back to filesystem
del DEFAULT_FILE_STORAGE
MEDIA_ROOT = "/edx/var/edxapp/uploads"
DEBUG = True
USE_I18N = True
TEMPLATE_DEBUG = True
SITE_NAME = 'localhost:8000'
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'Devstack')
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ LOGGERS ######################################
import logging
# Disable noisy loggers
for pkg_name in ['track.contexts', 'track.middleware', 'dd.dogapi']:
logging.getLogger(pkg_name).setLevel(logging.CRITICAL)
################################ EMAIL ########################################
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True # Enable email for all Studio courses
FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False # Give all courses email (don't require django-admin perms)
########################## ANALYTICS TESTING ########################
ANALYTICS_SERVER_URL = "http://127.0.0.1:9000/"
ANALYTICS_API_KEY = ""
# Set this to the dashboard URL in order to display the link from the
# dashboard to the Analytics Dashboard.
ANALYTICS_DASHBOARD_URL = None
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += (
'django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar_mongo.panel.MongoDebugPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'lms.envs.devstack.should_show_debug_toolbar'
}
def should_show_debug_toolbar(_):
return True # We always want the toolbar on devstack regardless of IP, auth, etc.
########################### PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
########################### VERIFIED CERTIFICATES #################################
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
FEATURES['ENABLE_PAYMENT_FAKE'] = True
CC_PROCESSOR_NAME = 'CyberSource2'
CC_PROCESSOR = {
'CyberSource2': {
"PURCHASE_ENDPOINT": '/shoppingcart/payment_fake/',
"SECRET_KEY": 'abcd123',
"ACCESS_KEY": 'abcd123',
"PROFILE_ID": 'edx',
}
}
########################### External REST APIs #################################
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
OAUTH_OIDC_ISSUER = 'http://127.0.0.1:8000/oauth2'
FEATURES['ENABLE_MOBILE_REST_API'] = True
FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True
########################## SECURITY #######################
FEATURES['ENFORCE_PASSWORD_POLICY'] = False
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
FEATURES['ADVANCED_SECURITY'] = False
PASSWORD_MIN_LENGTH = None
PASSWORD_COMPLEXITY = {}
########################### Milestones #################################
FEATURES['MILESTONES_APP'] = True
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
################################ COURSE LICENSES ################################
FEATURES['LICENSING'] = True
########################## Courseware Search #######################
FEATURES['ENABLE_COURSEWARE_SEARCH'] = False
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
########################## Dashboard Search #######################
FEATURES['ENABLE_DASHBOARD_SEARCH'] = True
########################## Certificates Web/HTML View #######################
FEATURES['CERTIFICATES_HTML_VIEW'] = True
########################## Course Discovery #######################
from django.utils.translation import ugettext as _
LANGUAGE_MAP = {'terms': {lang: display for lang, display in ALL_LANGUAGES}, 'name': _('Language')}
COURSE_DISCOVERY_MEANINGS = {
'org': {
'name': _('Organization'),
},
'modes': {
'name': _('Course Type'),
'terms': {
'honor': _('Honor'),
'verified': _('Verified'),
},
},
'language': LANGUAGE_MAP,
}
FEATURES['ENABLE_COURSE_DISCOVERY'] = True
FEATURES['COURSES_ARE_BROWSEABLE'] = True
HOMEPAGE_COURSE_MAX = 9
# Software secure fake page feature flag
FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True
# Setting for the testing of Software Secure Result Callback
VERIFY_STUDENT["SOFTWARE_SECURE"] = {
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
}
########################## Shopping cart ##########################
FEATURES['ENABLE_SHOPPING_CART'] = True
FEATURES['STORE_BILLING_INFO'] = True
FEATURES['ENABLE_PAID_COURSE_REGISTRATION'] = True
FEATURES['ENABLE_COSMETIC_DISPLAY_PRICE'] = True
########################## Third Party Auth #######################
if FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and 'third_party_auth.dummy.DummyBackend' not in AUTHENTICATION_BACKENDS:
AUTHENTICATION_BACKENDS = ['third_party_auth.dummy.DummyBackend'] + list(AUTHENTICATION_BACKENDS)
#####################################################################
# See if the developer has any local overrides.
try:
from .private import * # pylint: disable=wildcard-import
except ImportError:
pass
#####################################################################
# Lastly, run any migrations, if needed.
MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE)
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
| agpl-3.0 | 8,309,838,323,921,532,000 | 32.47619 | 126 | 0.595543 | false | 3.825272 | false | false | false |
adbar/htmldate | htmldate/validators.py | 1 | 7114 | # pylint:disable-msg=E0611,I1101
"""
Filters for date parsing and date validators.
"""
## This file is available from https://github.com/adbar/htmldate
## under GNU GPL v3 license
# standard
import datetime
import logging
import time
from collections import Counter
from functools import lru_cache
from .settings import MIN_DATE, MIN_YEAR, LATEST_POSSIBLE, MAX_YEAR
LOGGER = logging.getLogger(__name__)
LOGGER.debug('date settings: %s %s %s', MIN_YEAR, LATEST_POSSIBLE, MAX_YEAR)
@lru_cache(maxsize=32)
def date_validator(date_input, outputformat, earliest=MIN_DATE, latest=LATEST_POSSIBLE):
"""Validate a string w.r.t. the chosen outputformat and basic heuristics"""
# try if date can be parsed using chosen outputformat
if not isinstance(date_input, datetime.date):
# speed-up
try:
if outputformat == '%Y-%m-%d':
dateobject = datetime.datetime(int(date_input[:4]),
int(date_input[5:7]),
int(date_input[8:10]))
# default
else:
dateobject = datetime.datetime.strptime(date_input, outputformat)
except ValueError:
return False
else:
dateobject = date_input
# basic year validation
year = int(datetime.date.strftime(dateobject, '%Y'))
if MIN_YEAR <= year <= MAX_YEAR:
# not newer than today or stored variable
try:
if earliest <= dateobject.date() <= latest:
return True
except AttributeError:
if earliest <= dateobject <= latest:
return True
LOGGER.debug('date not valid: %s', date_input)
return False
def output_format_validator(outputformat):
"""Validate the output format in the settings"""
# test in abstracto
if not isinstance(outputformat, str) or not '%' in outputformat:
logging.error('malformed output format: %s', outputformat)
return False
# test with date object
dateobject = datetime.datetime(2017, 9, 1, 0, 0)
try:
dateobject.strftime(outputformat)
except (NameError, TypeError, ValueError) as err:
logging.error('wrong output format or format type: %s %s', outputformat, err)
return False
return True
@lru_cache(maxsize=32)
def plausible_year_filter(htmlstring, pattern, yearpat, tocomplete=False):
"""Filter the date patterns to find plausible years only"""
# slow!
allmatches = pattern.findall(htmlstring)
occurrences = Counter(allmatches)
toremove = set()
# LOGGER.debug('occurrences: %s', occurrences)
for item in occurrences.keys():
# scrap implausible dates
try:
if tocomplete is False:
potential_year = int(yearpat.search(item).group(1))
else:
lastdigits = yearpat.search(item).group(1)
if lastdigits[0] == '9':
potential_year = int('19' + lastdigits)
else:
potential_year = int('20' + lastdigits)
except AttributeError:
LOGGER.debug('not a year pattern: %s', item)
toremove.add(item)
else:
if potential_year < MIN_YEAR or potential_year > MAX_YEAR:
LOGGER.debug('no potential year: %s', item)
toremove.add(item)
# occurrences.remove(item)
# continue
# preventing dictionary changed size during iteration error
for item in toremove:
del occurrences[item]
return occurrences
def compare_values(reference, attempt, outputformat, original_date):
"""Compare the date expression to a reference"""
timestamp = time.mktime(datetime.datetime.strptime(attempt, outputformat).timetuple())
if original_date is True:
if reference == 0 or timestamp < reference:
reference = timestamp
else:
if timestamp > reference:
reference = timestamp
return reference
@lru_cache(maxsize=32)
def filter_ymd_candidate(bestmatch, pattern, original_date, copyear, outputformat, min_date, max_date):
"""Filter free text candidates in the YMD format"""
if bestmatch is not None:
pagedate = '-'.join([bestmatch.group(1), bestmatch.group(2), bestmatch.group(3)])
if date_validator(pagedate, '%Y-%m-%d', earliest=min_date, latest=max_date) is True:
if copyear == 0 or int(bestmatch.group(1)) >= copyear:
LOGGER.debug('date found for pattern "%s": %s', pattern, pagedate)
return convert_date(pagedate, '%Y-%m-%d', outputformat)
## TODO: test and improve
#if original_date is True:
# if copyear == 0 or int(bestmatch.group(1)) <= copyear:
# LOGGER.debug('date found for pattern "%s": %s', pattern, pagedate)
# return convert_date(pagedate, '%Y-%m-%d', outputformat)
#else:
# if copyear == 0 or int(bestmatch.group(1)) >= copyear:
# LOGGER.debug('date found for pattern "%s": %s', pattern, pagedate)
# return convert_date(pagedate, '%Y-%m-%d', outputformat)
return None
def convert_date(datestring, inputformat, outputformat):
"""Parse date and return string in desired format"""
# speed-up (%Y-%m-%d)
if inputformat == outputformat:
return str(datestring)
# date object (speedup)
if isinstance(datestring, datetime.date):
return datestring.strftime(outputformat)
# normal
dateobject = datetime.datetime.strptime(datestring, inputformat)
return dateobject.strftime(outputformat)
def check_extracted_reference(reference, outputformat, min_date, max_date):
'''Test if the extracted reference date can be returned'''
if reference > 0:
dateobject = datetime.datetime.fromtimestamp(reference)
converted = dateobject.strftime(outputformat)
if date_validator(converted, outputformat, earliest=min_date, latest=max_date) is True:
return converted
return None
def get_min_date(min_date):
'''Validates the minimum date and/or defaults to earliest plausible date'''
if min_date is not None:
try:
# internal conversion from Y-M-D format
min_date = datetime.date(int(min_date[:4]),
int(min_date[5:7]),
int(min_date[8:10]))
except ValueError:
min_date = MIN_DATE
else:
min_date = MIN_DATE
return min_date
def get_max_date(max_date):
'''Validates the maximum date and/or defaults to latest plausible date'''
if max_date is not None:
try:
# internal conversion from Y-M-D format
max_date = datetime.date(int(max_date[:4]),
int(max_date[5:7]),
int(max_date[8:10]))
except ValueError:
max_date = LATEST_POSSIBLE
else:
max_date = LATEST_POSSIBLE
return max_date
| gpl-3.0 | -6,857,535,308,415,212,000 | 36.640212 | 103 | 0.606129 | false | 4.051253 | true | false | false |
openqt/algorithms | extras/kaprekar_number.py | 1 | 1328 | # coding=utf-8
"""
卡布列克数
http://group.jobbole.com/26887/
有一种数被称为卡布列克数,其形式如:45 * 45 = 2025 并且 20+25=45,这样 45 就是一个
卡布列克数。
它标准定义如下:
若正整数X在N进制下的平方可以分割为二个数字,而这二个数字相加后恰等于X,那么X就是
N进制下的卡布列克数。
分解后的数字必须是正整数才可以,例如:10*10=100 并且 10+0=10,因为0不是正整数,
所以10不是卡布列克数。
现在题目的要求是给定你一个范围[a,b](b大于等于a,a大于等于0),你需要把这个范围内的
卡布列克数全部输出。
样例如下:
输入:2 100
输出:9 45 55 99
"""
from __future__ import print_function
def is_kaprekar(n):
level, sq = 10, n * n
while level < sq:
a, b = divmod(sq, level)
if b > 0 and a + b == n:
return level
level *= 10
return 0
def kaprekar_number(start, stop=None):
while True:
if is_kaprekar(start):
yield start
if stop and start >= stop:
break
start += 1
if __name__ == '__main__':
print(is_kaprekar(45))
print(is_kaprekar(40))
print(is_kaprekar(100))
print([i for i in kaprekar_number(2, 1000)])
| gpl-3.0 | -7,367,578,442,279,746,000 | 17.88 | 55 | 0.595339 | false | 1.636049 | false | false | false |
google-research/google-research | simulation_research/signal_processing/spherical/spherical_harmonics.py | 1 | 5602 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A library for computing spherical harmonics.
The spherical harmonics are special functions defined on the surface of a
sphere, which are often used to solve partial differential equations in many
scientific applications. A physical field defined on the surface of a sphere can
be written as a linear superposition of the spherical harmonics as the latter
form a complete set of orthogonal basis functions. The set of spherical
harmonics denoted `Y_l^m(θ, φ)` is often called Laplace's spherical
harmonics of degree `l` and order `m` and `θ` and `φ` are colatitude and
longitude, respectively. In addition, the spherical harmonics can be expressed
as `Y_l^m(θ, φ) = P_l^m(θ) \exp(i m φ)`, in which
`P_l^m(θ)` is the associated Legendre function with embedded normalization
constant \sqrt(1 / (4 𝛑)). We refer to the function f(θ, φ) with finite induced
norm as the signal on the sphere, where the colatitude θ ∈ [0, π] and longitude
φ ∈ [0, 2π). The signal on the sphere can be written as a linear superpostiion
of the spherical harmoincs, which form a complete set of orthonormal basis
functions for degree l ≥ 0 and order |m| ≤ l. In this library, θ and φ can be
non-uniformly sampled.
"""
import jax.numpy as jnp
import numpy as np
from simulation_research.signal_processing.spherical import associated_legendre_function
class SphericalHarmonics(object):
"""Computes the spherical harmonics on TPUs."""
def __init__(self,
l_max,
theta,
phi):
"""Constructor.
Args:
l_max: The maximum degree of the associated Legendre function. The degrees
are `[0, 1, 2, ..., l_max]`. The orders `m` are `[-l_max, -l_max+1,
0, 1, ..., l_max]`.
theta: A vector containing the sampling points along the colatitude
dimension. The associated Legendre functions are computed at
`cos(θ)`.
phi: A vector containing the sampling points along the longitude, at which
the Vandermonde matrix is computed.
"""
self.l_max = l_max
self.theta = theta
self._cos_theta = jnp.cos(theta)
self.phi = phi
self._legendre = associated_legendre_function.gen_normalized_legendre(
self.l_max, self._cos_theta)
self._vandermonde = self._gen_vandermonde_mat(self.l_max, self.phi)
def _gen_vandermonde_mat(self, l_max, phi):
"""Generates the Vandermonde matrix exp(i m φ).
The Vandermonde matrix has the first dimension along the degrees of the
spherical harmonics and the second dimension along the longitude.
Args:
l_max: See `init`.
phi: See `init`.
Returns:
A complex matrix.
"""
nonnegative_degrees = jnp.arange(l_max+1)
mat_dim0, mat_dim1 = jnp.meshgrid(nonnegative_degrees, phi, indexing='ij')
num_phi = phi.shape[0]
def vandermonde_fn(mat_dim0, mat_dim1, num_pts):
coeff = 1j / num_pts
return jnp.exp(coeff * jnp.multiply(mat_dim0, mat_dim1))
return vandermonde_fn(mat_dim0, mat_dim1, num_phi)
def harmonics_nonnegative_order(self):
"""Computes the spherical harmonics of nonnegative orders.
Returns:
A 4D complex tensor of shape `(l_max + 1, l_max + 1, num_theta, num_phi)`,
where the dimensions are in the sequence of degree, order, colatitude, and
longitude.
"""
return jnp.einsum('ijk,jl->ijkl', self._legendre, self._vandermonde)
def _gen_mask(self):
"""Generates the mask of (-1)^m, m = [0, 1, ..., l_max]."""
mask = np.empty((self.l_max + 1,))
mask[::2] = 1
mask[1::2] = -1
return jnp.asarray((mask))
def harmonics_nonpositive_order(
self, harmonics_nonnegative_order = None):
"""Computes the spherical harmonics of nonpositive orders.
With normalization, the nonnegative order Associated Legendre functions are
`P_l^{-m}(x) = (−1)^m P_l^m(x)`, which implies that
`Y_l^{-m}(θ, φ) = (−1)^m conjugate(Y_l^m(θ, φ))`.
Args:
harmonics_nonnegative_order: A 4D complex tensor representing the
harmonics of nonnegative orders, the shape of which is
`(l_max + 1, l_max + 1, num_theta, num_phi)` andd the dimensions are in
the sequence of degree, order, colatitude, and longitude.
Returns:
A 4D complex tensor of the same shape as `harmonics_nonnegative_order`
representing the harmonics of nonpositive orders.
"""
if harmonics_nonnegative_order is None:
harmonics_nonnegative_order = self.harmonics_nonnegative_order()
mask = self._gen_mask()
return jnp.einsum(
'j,ijkl->ijkl', mask, jnp.conjugate(harmonics_nonnegative_order))
@property
def associated_legendre_fn(self):
"""Associated Legendre function values.
Returns:
A 3D tensor of shape `(l_max + 1, l_max + 1, num_theta)` containing the
values of the associated Legendre functions, the dimensions of which is in
the sequence of degree, order, and colatitude.
"""
return self._legendre
| apache-2.0 | -9,107,182,008,301,480,000 | 38.183099 | 88 | 0.687275 | false | 3.409314 | false | false | false |
tensorflow/ecosystem | data_service/tf_std_data_server.py | 1 | 2000 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run a tf.data service server."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
flags = tf.compat.v1.app.flags
flags.DEFINE_integer("port", 0, "Port to listen on")
flags.DEFINE_bool("is_dispatcher", False, "Whether to start a dispatcher (as opposed to a worker server")
flags.DEFINE_string("dispatcher_address", "", "The address of the dispatcher. This is only needed when starting a worker server.")
flags.DEFINE_string("worker_address", "", "The address of the worker server. This is only needed when starting a worker server.")
FLAGS = flags.FLAGS
def main(unused_argv):
if FLAGS.is_dispatcher:
print("Starting tf.data service dispatcher")
server = tf.data.experimental.service.DispatchServer(
tf.data.experimental.service.DispatcherConfig(
port=FLAGS.port,
protocol="grpc"))
else:
print("Starting tf.data service worker")
server = tf.data.experimental.service.WorkerServer(
tf.data.experimental.service.WorkerConfig(
port=FLAGS.port,
protocol="grpc",
dispatcher_address=FLAGS.dispatcher_address,
worker_address=FLAGS.worker_address))
server.join()
if __name__ == "__main__":
tf.compat.v1.app.run()
| apache-2.0 | 4,136,536,005,482,894,300 | 38.215686 | 130 | 0.688 | false | 4.192872 | false | false | false |
abramhindle/slowdraw | slowdraw.py | 1 | 5288 | #!/usr/bin/env python
''' Slowdraw watches an image file and makes animations out of the changes
'''
import sys
import cv2
import cv
import numpy as np
import logging
import time
import argparse
import watchdog
import os.path
import pickle
import math
from watchdog.observers import Observer
parser = argparse.ArgumentParser(description='slowdraw')
parser.add_argument('-W', default=1024, help='Width of window')
parser.add_argument('-H', default=768, help='Height of window')
parser.add_argument('-strict', default=False, help='Strictness')
parser.add_argument('path', help='Path of file to watch')
args = parser.parse_args()
full_w = int(args.W)
full_h = int(args.H)
strictness = bool(args.strict)
def new_rgb(width,height):
return np.zeros((height,width,3), np.uint8)
fullscreen_buffer = new_rgb(full_w,full_h)
logging.basicConfig(stream = sys.stderr, level=logging.INFO)
load_queue = []
class ModListener(watchdog.events.FileSystemEventHandler):
def __init__(self, handler):
super(ModListener, self).__init__()
self.queue = []
self.handler = handler;
def on_modified(self, event):
logging.info("Modified: "+event.src_path)
if ((not strictness and
os.path.dirname(args.path) == os.path.dirname(event.src_path))
or event.src_path == args.path):
logging.info( "Recorded Modified: " + event.src_path )
self.queue.append( event.src_path )
self.handler( event.src_path )
window_name = "slowdraw"
fullscreen = False
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN | cv2.WINDOW_OPENGL)
def start_fullscreen():
global fullscreen
global window_name
if not fullscreen:
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
fullscreen = True
else:
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, 0)
fullscreen = False
frame1 = cv2.imread(args.path)
w,h,_ = frame1.shape
frames = [frame1]
curr_frame = 0
done = False
def handle_frame(fname):
if (len(fname) > 4 and fname[-4:] == ".png"):
newframe = cv2.imread(fname)
frames.append(newframe)
mod_listener = ModListener(handle_frame)
observer = Observer()
directory = os.path.dirname(args.path)
observer.schedule(mod_listener, directory, recursive=True)
observer.start()
maxtime = 1000/2
mintime = 1000/30
# 2 4 8 16 32 64 128 256 512
maxtimes = [2000,2000,2000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]
mintimes = [1000,1000,1000, 1000, 500, 200, 100, 50, 50, 50]
def get_times(nframes):
index = int(math.ceil(math.log(nframes) / math.log(2)))
if index >= len(maxtimes):
return maxtimes[-1], mintimes[-1]
else:
return maxtimes[index], mintimes[index]
def scalexp(v,mint,maxt,scale=5):
mine = math.exp(1.0)/math.exp(scale)
maxe = 1.0
vs = math.exp(1 + (scale-1)*v)/math.exp(scale)
vs = (vs - mine)/(maxe - mine)
return vs * (maxt - mint) + mint
def linscale(v,mint,maxt):
return v*(maxt-mint) + mint
def maintain_aspect(maxx,maxy,x,y):
wr = maxx/float(x)
hr = maxy/float(y)
if hr*y <= maxy or hr*x <= maxx:
return (int(hr*x),int(hr*y))
else:
return (int(wr*x),int(wr*y))
# maintain_aspect(1024,768,640,480)==(1024,768)
# maintain_aspect(1024,768,608,472)==(989,768)
# maintain_aspect(1024,768,random.randint(1,1324),random.randint(1,1324))
fourcc = cv2.cv.FOURCC(*'XVID')
writer = cv2.VideoWriter("slowdraw.avi",fourcc,30,(h,w),1)
frametime = 1000.0/30.0
resized_frame = None
fs_offset_x = 0
fs_offset_y = 0
cv2.imshow('slowdraw', fullscreen_buffer )
try:
while not done:
framen = curr_frame % len(frames)
frame = frames[curr_frame % len(frames)]
#if resized_frame == None:
# (lh,lw,depth) = frame.shape
# ratio = float(full_h)/float(lh)
# (resized_w,resized_h) = maintain_aspect(full_w,full_h,lw,lh)
# resized_frame = new_rgb(resized_w,resized_h)
# fs_offset_x = (full_w - resized_w)/2
# fs_offset_y = (full_h - resized_h)/2
# print "%s %s %s %s" % (resized_w,resized_h,fs_offset_x, fs_offset_y)
#resized_frame[:,:] = cv2.resize(frame,(resized_w,resized_h))
#fullscreen_buffer[fs_offset_y:fs_offset_y+resized_h , fs_offset_x:fs_offset_x+resized_w] = resized_frame
cv2.imshow('slowdraw', frame )
#print "%s,%s,%s" % fullscreen_buffer.shape
#cv2.imshow('slowdraw', fullscreen_buffer )
tmaxtime, tmintime = get_times(len(frames))
wait = scalexp( (framen + 1.0) / len(frames) , tmintime,tmaxtime)
print(wait,tmaxtime,tmintime)
curr_frame += 1
for i in range(0,max(1,int(wait/frametime))):
# print("Writing frame %s %s %s" % (i,wait,wait/frametime))
writer.write(frame)
# TODO: fix the wait time
k = cv2.waitKey(int(wait)) & 0xff
if k == 27:
done = True
continue
if k == ord('f'):
start_fullscreen()
except KeyboardInterrupt:
observer.stop()
# pickle.dump(frames,file('slowdraw.pkl','wb'))
writer.release()
observer.stop()
observer.join()
| gpl-3.0 | 8,090,950,691,469,998,000 | 28.707865 | 114 | 0.628404 | false | 2.980834 | false | false | false |
rhelmer/socorro-lib | socorro/unittest/external/postgresql/test_backfill.py | 1 | 12534 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from .unittestbase import PostgreSQLTestCase
from nose.plugins.attrib import attr
from nose.tools import eq_, assert_raises
import datetime
from socorro.external.postgresql.backfill import Backfill
from socorro.external.postgresql import staticdata, fakedata
from socorro.external import MissingArgumentError
from socorro.lib import datetimeutil
#==============================================================================
@attr(integration='postgres')
class TestBackfill(PostgreSQLTestCase):
"""Tests the calling of all backfill functions"""
#--------------------------------------------------------------------------
def setUp(self):
""" Populate tables with fake data """
super(TestBackfill, self).setUp()
cursor = self.connection.cursor()
self.tables = []
for table in staticdata.tables + fakedata.tables:
# staticdata has no concept of duration
if table.__module__ == 'socorro.external.postgresql.staticdata':
table = table()
else:
table = table(days=1)
table.releases = {
'WaterWolf': {
'channels': {
'Nightly': {
'versions': [{
'number': '18.0',
'probability': 0.5,
'buildid': '%s000020'
}],
'adu': '10',
'repository': 'nightly',
'throttle': '1',
'update_channel': 'nightly',
},
},
'crashes_per_hour': '5',
'guid': '{[email protected]}'
},
'B2G': {
'channels': {
'Nightly': {
'versions': [{
'number': '18.0',
'probability': 0.5,
'buildid': '%s000020'
}],
'adu': '10',
'repository': 'nightly',
'throttle': '1',
'update_channel': 'nightly',
},
},
'crashes_per_hour': '5',
'guid': '{[email protected]}'
}
}
table_name = table.table
table_columns = table.columns
values = str(tuple(["%(" + i + ")s" for i in table_columns]))
columns = str(tuple(table_columns))
self.tables.append(table_name)
# TODO: backfill_reports_clean() sometimes tries to insert a
# os_version_id that already exists
if table_name is not "os_versions":
for rows in table.generate_rows():
data = dict(zip(table_columns, rows))
query = "INSERT INTO %(table)s " % {'table': table_name}
query = query + columns.replace("'", "").replace(",)", ")")
query = query + " VALUES "
query = query + values.replace(",)", ")").replace("'", "")
cursor.execute(query, data)
self.connection.commit()
#--------------------------------------------------------------------------
def tearDown(self):
""" Cleanup the database, delete tables and functions """
cursor = self.connection.cursor()
tables = str(self.tables).replace("[", "").replace("]", "")
cursor.execute("TRUNCATE " + tables.replace("'", "") + " CASCADE;")
self.connection.commit()
self.connection.close()
super(TestBackfill, self).tearDown()
#--------------------------------------------------------------------------
def setup_data(self):
self.now = datetimeutil.utc_now()
now = self.now.date()
yesterday = now - datetime.timedelta(days=1)
lastweek = now - datetime.timedelta(days=7)
now_str = datetimeutil.date_to_string(now)
yesterday_str = datetimeutil.date_to_string(yesterday)
lastweek_str = datetimeutil.date_to_string(lastweek)
self.test_source_data = {
# Test backfill_adu
'adu': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_all_dups
'all_dups': {
'params': {
"start_date": yesterday_str,
"end_date": now_str,
},
'res_expected': [(True,)],
},
# Test backfill_build_adu
'build_adu': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_correlations
'correlations': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_crashes_by_user_build
'crashes_by_user_build': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_crashes_by_user
'crashes_by_user': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# TODO: Test backfill_daily_crashes tries to insert into a table
# that do not exists. It can be fixed by creating a temporary one.
#'daily_crashes': {
# 'params': {
# "update_day": now_str,
# },
# 'res_expected': [(True,)],
# },
# Test backfill_exploitability
'exploitability': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_explosiveness
'explosiveness': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_home_page_graph_build
'home_page_graph_build': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_home_page_graph
'home_page_graph': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_matviews
'matviews': {
'params': {
"start_date": yesterday_str,
"reports_clean": 'false',
},
'res_expected': [(True,)],
},
# Test backfill_nightly_builds
'nightly_builds': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_rank_compare
'rank_compare': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_reports_clean
'reports_clean': {
'params': {
"start_date": yesterday_str,
"end_date": now_str,
},
'res_expected': [(True,)],
},
# TODO: Test backfill_reports_duplicates tries to insert into a
# table that do not exists. It can be fixed by using the update
# function inside of the backfill.
#'reports_duplicates': {
# 'params': {
# "start_date": yesterday_str,
# "end_date": now_str,
# },
# 'res_expected': [(True,)],
# },
# TODO: Test backfill_signature_counts tries to insert into
# tables and to update functions that does not exist.
#'signature_counts': {
# 'params': {
# "start_date": yesterday_str,
# "end_date": now_str,
# },
# 'res_expected': [(True,)],
# },
# Test backfill_tcbs_build
'tcbs_build': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_tcbs
'tcbs': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_weekly_report_partitions
'weekly_report_partitions': {
'params': {
"start_date": lastweek_str,
"end_date": now_str,
"table_name": 'raw_crashes',
},
'res_expected': [(True,)],
},
# TODO: Update Backfill to support signature_summary backfill
# through the API
#'signature_summary_products': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_installations': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_uptime': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_os': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_process_type': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_architecture': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_flash_version': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_device': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_graphics': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
}
#--------------------------------------------------------------------------
def test_get(self):
backfill = Backfill(config=self.config)
#......................................................................
# Test raise error if kind of backfill is not passed
params = {"backfill_type": ''}
assert_raises(MissingArgumentError, backfill.get, **params)
#......................................................................
# Test all the backfill functions
self.setup_data()
for test, data in self.test_source_data.items():
data['params']['backfill_type'] = str(test)
res = backfill.get(**data['params'])
eq_(res[0], data['res_expected'][0])
| mpl-2.0 | 5,271,359,420,006,023,000 | 35.017241 | 79 | 0.390618 | false | 4.763968 | true | false | false |
kylewray/nova | python/nova/nova_pomdp.py | 1 | 6240 | """ The MIT License (MIT)
Copyright (c) 2015 Kyle Hollins Wray, University of Massachusetts
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import ctypes as ct
import platform
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__))))
import nova_pomdp_alpha_vectors as npav
import pomdp_alpha_vectors as pav
# Check if we need to create the nova variable. If so, import the correct library
# file depending on the platform.
#try:
# _nova
#except NameError:
_nova = None
if platform.system() == "Windows":
_nova = ct.CDLL(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..", "..", "lib", "libnova.dll"))
else:
_nova = ct.CDLL(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..", "..", "lib", "libnova.so"))
class NovaPOMDP(ct.Structure):
""" The C struct POMDP object. """
_fields_ = [("n", ct.c_uint),
("ns", ct.c_uint),
("m", ct.c_uint),
("z", ct.c_uint),
("r", ct.c_uint),
("rz", ct.c_uint),
("gamma", ct.c_float),
("horizon", ct.c_uint),
("S", ct.POINTER(ct.c_int)),
("T", ct.POINTER(ct.c_float)),
("O", ct.POINTER(ct.c_float)),
("R", ct.POINTER(ct.c_float)),
("Z", ct.POINTER(ct.c_int)),
("B", ct.POINTER(ct.c_float)),
("d_S", ct.POINTER(ct.c_int)),
("d_T", ct.POINTER(ct.c_float)),
("d_O", ct.POINTER(ct.c_float)),
("d_R", ct.POINTER(ct.c_float)),
("d_Z", ct.POINTER(ct.c_int)),
("d_B", ct.POINTER(ct.c_float)),
]
# Functions from 'pomdp_model_cpu.h'.
_nova.pomdp_initialize_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint, # n
ct.c_uint, # ns
ct.c_uint, # m
ct.c_uint, # z
ct.c_uint, # r
ct.c_uint, # rz
ct.c_float, # gamma
ct.c_uint) # horizon
_nova.pomdp_belief_update_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.POINTER(ct.c_float), # b
ct.c_uint, # a
ct.c_uint, # o
ct.POINTER(ct.POINTER(ct.c_float))) # bp
_nova.pomdp_add_new_raw_beliefs_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint, # numBeliefPointsToAdd
ct.POINTER(ct.c_float)) # Bnew
_nova.pomdp_uninitialize_cpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
# Functions from 'pomdp_expand_cpu.h'.
_nova.pomdp_expand_random_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint) # numBeliefsToAdd
_nova.pomdp_expand_distinct_beliefs_cpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_expand_pema_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.POINTER(pav.POMDPAlphaVectors)) # policy
# Functions from 'pomdp_sigma_cpu.h'.
_nova.pomdp_sigma_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint, # numDesiredNonZeroValues
ct.POINTER(ct.c_float)) # sigma
# Functions from 'pomdp_model_gpu.h'.
_nova.pomdp_initialize_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_successors_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_successors_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_state_transitions_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_state_transitions_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_observation_transitions_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_observation_transitions_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_rewards_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_rewards_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_nonzero_beliefs_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_nonzero_beliefs_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_belief_points_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_belief_points_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
# Functions from 'pomdp_expand_gpu.h'.
_nova.pomdp_expand_random_gpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint, # numThreads
ct.c_uint) # numBeliefsToAdd
| mit | 2,637,093,861,809,474,000 | 47.372093 | 98 | 0.565705 | false | 3.604853 | false | false | false |
googleads/googleads-python-lib | examples/ad_manager/v202011/team_service/update_teams.py | 1 | 2271 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates teams by changing its description.
To determine which teams exist, run get_all_teams.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
TEAM_ID = 'INSERT_TEAM_ID_HERE'
def main(client, team_id):
# Initialize appropriate service.
team_service = client.GetService('TeamService', version='v202011')
# Create a filter statement to select a single team by ID.
statement = (ad_manager.StatementBuilder(version='v202011')
.Where('id = :teamId')
.WithBindVariable('teamId', int(team_id)))
# Get teams by statement.
response = team_service.getTeamsByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
updated_teams = []
# Update each local team object by changing its description.
for team in response['results']:
team['description'] = 'this team is great!'
updated_teams.append(team)
# Update teams on the server.
teams = team_service.updateTeams(updated_teams)
# Display results.
for team in teams:
print('Team with id "%s" and name "%s" was updated.'
% (team['id'], team['name']))
else:
print('No teams found to update.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, TEAM_ID)
| apache-2.0 | -2,277,132,433,655,368,400 | 32.397059 | 77 | 0.712902 | false | 3.942708 | false | false | false |
starius/wt-classes | examples/make-all.py | 1 | 1140 | #!/usr/bin/python
import sys
import re
from optparse import OptionParser
entrypoints = []
anchors = []
parser = OptionParser()
parser.add_option("--cpp", dest="cpp")
parser.add_option("--template", dest="template")
parser.add_option("--wrasterimage", dest="wrasterimage", action="store_true")
(options, args) = parser.parse_args()
remove_main = re.compile("int main.+\}", re.DOTALL)
for cpp in options.cpp.split():
if not cpp.endswith('all.cpp'):
sys.stdout.write(remove_main.sub("", open(cpp).read()))
low = re.split(r'[/\\]', cpp)[-1].split('.')[0]
if not options.wrasterimage and low == 'captcha':
continue
Cap = re.search(r"create([^\s]+)App", open(cpp).read()).groups()[0]
args = {'low': low, 'Cap': Cap}
entrypoints.append('''
addEntryPoint(Wt::Application, create%(Cap)sApp, "/%(low)s");
''' % args)
anchors.append('''
new WAnchor("%(low)s", "%(Cap)s", root());
new WBreak(root());
''' % args)
sys.stdout.write(open(options.template).read() %
{'entrypoints': ''.join(entrypoints), 'anchors': ''.join(anchors)})
| gpl-2.0 | 7,167,563,787,169,136,000 | 31.571429 | 77 | 0.592105 | false | 3.392857 | false | false | false |
SMTorg/smt | smt/applications/mfk.py | 1 | 27540 | # -*- coding: utf-8 -*-
"""
Created on Fri May 04 10:26:49 2018
@author: Mostafa Meliani <[email protected]>
Multi-Fidelity co-Kriging: recursive formulation with autoregressive model of
order 1 (AR1)
Adapted on January 2021 by Andres Lopez-Lopera to the new SMT version
"""
from copy import deepcopy
import numpy as np
from scipy.linalg import solve_triangular
from scipy import linalg
from scipy.spatial.distance import cdist
from packaging import version
from sklearn import __version__ as sklversion
if version.parse(sklversion) < version.parse("0.22"):
from sklearn.cross_decomposition.pls_ import PLSRegression as pls
else:
from sklearn.cross_decomposition import PLSRegression as pls
from smt.surrogate_models.krg_based import KrgBased
from smt.sampling_methods import LHS
from smt.utils.kriging_utils import (
cross_distances,
componentwise_distance,
standardization,
differences,
)
class NestedLHS(object):
def __init__(self, nlevel, xlimits, random_state=None):
"""
Constructor where values of options can be passed in.
Parameters
----------
nlevel : integer.
The number of design of experiments to be built
xlimits : ndarray
The interval of the domain in each dimension with shape (nx, 2)
random_state : Numpy RandomState object or seed number which controls random draws
"""
self.nlevel = nlevel
self.xlimits = xlimits
self.random_state = random_state
def __call__(self, nb_samples_hifi):
"""
Builds nlevel nested design of experiments of dimension dim and size n_samples.
Each doe sis built with the optmized lhs procedure.
Builds the highest level first; nested properties are ensured by deleting
the nearest neighbours in lower levels of fidelity.
Parameters
----------
nb_samples_hifi: The number of samples of the highest fidelity model.
nb_samples_fi(n-1) = 2 * nb_samples_fi(n)
Returns
------
list of length nlevel of design of experiemnts from low to high fidelity level.
"""
nt = []
for i in range(self.nlevel, 0, -1):
nt.append(pow(2, i - 1) * nb_samples_hifi)
if len(nt) != self.nlevel:
raise ValueError("nt must be a list of nlevel elements")
if np.allclose(np.sort(nt)[::-1], nt) == False:
raise ValueError("nt must be a list of decreasing integers")
doe = []
p0 = LHS(xlimits=self.xlimits, criterion="ese", random_state=self.random_state)
doe.append(p0(nt[0]))
for i in range(1, self.nlevel):
p = LHS(
xlimits=self.xlimits, criterion="ese", random_state=self.random_state
)
doe.append(p(nt[i]))
for i in range(1, self.nlevel)[::-1]:
ind = []
d = cdist(doe[i], doe[i - 1], "euclidean")
for j in range(doe[i].shape[0]):
dj = np.sort(d[j, :])
k = dj[0]
l = (np.where(d[j, :] == k))[0][0]
m = 0
while l in ind:
m = m + 1
k = dj[m]
l = (np.where(d[j, :] == k))[0][0]
ind.append(l)
doe[i - 1] = np.delete(doe[i - 1], ind, axis=0)
doe[i - 1] = np.vstack((doe[i - 1], doe[i]))
return doe
class MFK(KrgBased):
def _initialize(self):
super(MFK, self)._initialize()
declare = self.options.declare
declare(
"rho_regr",
"constant",
values=("constant", "linear", "quadratic"),
desc="Regression function type for rho",
)
declare(
"optim_var",
False,
types=bool,
values=(True, False),
desc="If True, the variance at HF samples is forced to zero",
)
declare(
"propagate_uncertainty",
True,
types=bool,
values=(True, False),
desc="If True, the variance cotribution of lower fidelity levels are considered",
)
self.name = "MFK"
def _differences(self, X, Y):
"""
Compute the distances
"""
return differences(X, Y)
def _check_list_structure(self, X, y):
"""
checks if the data structure is compatible with MFK.
sets class attributes such as (number of levels of Fidelity, training points in each level, ...)
Arguments :
X : list of arrays, each array corresponds to a fidelity level. starts from lowest to highest
y : same as X
"""
if type(X) is not list:
nlevel = 1
X = [X]
else:
nlevel = len(X)
if type(y) is not list:
y = [y]
if len(X) != len(y):
raise ValueError("X and y must have the same length.")
n_samples = np.zeros(nlevel, dtype=int)
n_features = np.zeros(nlevel, dtype=int)
n_samples_y = np.zeros(nlevel, dtype=int)
for i in range(nlevel):
n_samples[i], n_features[i] = X[i].shape
if i > 1 and n_features[i] != n_features[i - 1]:
raise ValueError("All X must have the same number of columns.")
y[i] = np.asarray(y[i]).ravel()[:, np.newaxis]
n_samples_y[i] = y[i].shape[0]
if n_samples[i] != n_samples_y[i]:
raise ValueError("X and y must have the same number of rows.")
self.nx = n_features[0]
self.nt_all = n_samples
self.nlvl = nlevel
self.ny = y[0].shape[1]
self.X = X[:]
self.y = y[:]
def _new_train(self):
"""
Overrides KrgBased implementation
Trains the Multi-Fidelity model
"""
self._new_train_init()
theta0 = self.options["theta0"].copy()
noise0 = self.options["noise0"].copy()
for lvl in range(self.nlvl):
self._new_train_iteration(lvl)
self.options["theta0"] = theta0
self.options["noise0"] = noise0
self._new_train_finalize(lvl)
def _new_train_init(self):
if self.name in ["MFKPLS", "MFKPLSK"]:
_pls = pls(self.options["n_comp"])
# As of sklearn 0.24.1 PLS with zeroed outputs raises an exception while sklearn 0.23 returns zeroed x_rotations
# For now the try/except below is a workaround to restore the 0.23 behaviour
try:
# PLS is done on the highest fidelity identified by the key None
self.m_pls = _pls.fit(
self.training_points[None][0][0].copy(),
self.training_points[None][0][1].copy(),
)
self.coeff_pls = self.m_pls.x_rotations_
except StopIteration:
self.coeff_pls = np.zeros(
self.training_points[None][0][0].shape[1], self.options["n_comp"]
)
xt = []
yt = []
i = 0
while self.training_points.get(i, None) is not None:
xt.append(self.training_points[i][0][0])
yt.append(self.training_points[i][0][1])
i = i + 1
xt.append(self.training_points[None][0][0])
yt.append(self.training_points[None][0][1])
self._check_list_structure(xt, yt)
self._check_param()
X = self.X
y = self.y
_, _, self.X_offset, self.y_mean, self.X_scale, self.y_std = standardization(
np.concatenate(xt, axis=0), np.concatenate(yt, axis=0)
)
nlevel = self.nlvl
# initialize lists
self.optimal_noise_all = nlevel * [0]
self.D_all = nlevel * [0]
self.F_all = nlevel * [0]
self.p_all = nlevel * [0]
self.q_all = nlevel * [0]
self.optimal_rlf_value = nlevel * [0]
self.optimal_par = nlevel * [{}]
self.optimal_theta = nlevel * [0]
self.X_norma_all = [(x - self.X_offset) / self.X_scale for x in X]
self.y_norma_all = [(f - self.y_mean) / self.y_std for f in y]
def _new_train_iteration(self, lvl):
n_samples = self.nt_all
self.options["noise0"] = np.array([self.options["noise0"][lvl]]).flatten()
self.options["theta0"] = self.options["theta0"][lvl, :]
self.X_norma = self.X_norma_all[lvl]
self.y_norma = self.y_norma_all[lvl]
if self.options["eval_noise"]:
if self.options["use_het_noise"]:
# hetGP works with unique design variables
(
self.X_norma,
self.index_unique, # do we need to store it?
self.nt_reps, # do we need to store it?
) = np.unique(
self.X_norma, return_inverse=True, return_counts=True, axis=0
)
self.nt_all[lvl] = self.X_norma.shape[0]
# computing the mean of the output per unique design variable (see Binois et al., 2018)
y_norma_unique = []
for i in range(self.nt_all[lvl]):
y_norma_unique.append(np.mean(self.y_norma[self.index_unique == i]))
y_norma_unique = np.array(y_norma_unique).reshape(-1, 1)
# pointwise sensible estimates of the noise variances (see Ankenman et al., 2010)
self.optimal_noise = self.options["noise0"] * np.ones(self.nt_all[lvl])
for i in range(self.nt_all[lvl]):
diff = self.y_norma[self.index_unique == i] - y_norma_unique[i]
if np.sum(diff ** 2) != 0.0:
self.optimal_noise[i] = np.std(diff, ddof=1) ** 2
self.optimal_noise = self.optimal_noise / self.nt_reps
self.optimal_noise_all[lvl] = self.optimal_noise
self.y_norma = y_norma_unique
self.X_norma_all[lvl] = self.X_norma
self.y_norma_all[lvl] = self.y_norma
else:
self.optimal_noise = self.options["noise0"] / self.y_std ** 2
self.optimal_noise_all[lvl] = self.optimal_noise
# Calculate matrix of distances D between samples
self.D_all[lvl] = cross_distances(self.X_norma)
# Regression matrix and parameters
self.F_all[lvl] = self._regression_types[self.options["poly"]](self.X_norma)
self.p_all[lvl] = self.F_all[lvl].shape[1]
# Concatenate the autoregressive part for levels > 0
if lvl > 0:
F_rho = self._regression_types[self.options["rho_regr"]](self.X_norma)
self.q_all[lvl] = F_rho.shape[1]
self.F_all[lvl] = np.hstack(
(
F_rho
* np.dot(
self._predict_intermediate_values(
self.X_norma, lvl, descale=False
),
np.ones((1, self.q_all[lvl])),
),
self.F_all[lvl],
)
)
else:
self.q_all[lvl] = 0
n_samples_F_i = self.F_all[lvl].shape[0]
if n_samples_F_i != n_samples[lvl]:
raise Exception(
"Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model."
)
if int(self.p_all[lvl] + self.q_all[lvl]) >= n_samples_F_i:
raise Exception(
(
"Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the regression"
" model size p+q=%d."
)
% (n_samples_F_i, self.p_all[lvl] + self.q_all[lvl])
)
# Determine Gaussian Process model parameters
self.F = self.F_all[lvl]
D, self.ij = self.D_all[lvl]
self._lvl = lvl
self.nt = self.nt_all[lvl]
self.q = self.q_all[lvl]
self.p = self.p_all[lvl]
(
self.optimal_rlf_value[lvl],
self.optimal_par[lvl],
self.optimal_theta[lvl],
) = self._optimize_hyperparam(D)
if self.options["eval_noise"] and not self.options["use_het_noise"]:
tmp_list = self.optimal_theta[lvl]
self.optimal_theta[lvl] = tmp_list[:-1]
self.optimal_noise = tmp_list[-1]
self.optimal_noise_all[lvl] = self.optimal_noise
del self.y_norma, self.D, self.optimal_noise
def _new_train_finalize(self, lvl):
if self.options["eval_noise"] and self.options["optim_var"]:
X = self.X
for lvl in range(self.nlvl - 1):
self.set_training_values(
X[lvl], self._predict_intermediate_values(X[lvl], lvl + 1), name=lvl
)
self.set_training_values(
X[-1], self._predict_intermediate_values(X[-1], self.nlvl)
)
self.options["eval_noise"] = False
self._new_train()
def _componentwise_distance(self, dx, opt=0):
d = componentwise_distance(dx, self.options["corr"], self.nx)
return d
def _predict_intermediate_values(self, X, lvl, descale=True):
"""
Evaluates the model at a set of points.
Used for training the model at level lvl.
Allows to relax the order problem.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
lvl : level at which the prediction is made
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
n_eval, _ = X.shape
# if n_features_X != self.n_features:
# raise ValueError("Design must be an array of n_features columns.")
# Calculate kriging mean and variance at level 0
mu = np.zeros((n_eval, lvl))
if descale:
X = (X - self.X_offset) / self.X_scale
f = self._regression_types[self.options["poly"]](X)
f0 = self._regression_types[self.options["poly"]](X)
dx = self._differences(X, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
beta = self.optimal_par[0]["beta"]
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
gamma = self.optimal_par[0]["gamma"]
# Scaled predictor
mu[:, 0] = (np.dot(f, beta) + np.dot(r_, gamma)).ravel()
# Calculate recursively kriging mean and variance at level i
for i in range(1, lvl):
g = self._regression_types[self.options["rho_regr"]](X)
dx = self._differences(X, Y=self.X_norma_all[i])
d = self._componentwise_distance(dx)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[i], d
).reshape(n_eval, self.nt_all[i])
f = np.vstack((g.T * mu[:, i - 1], f0.T))
beta = self.optimal_par[i]["beta"]
gamma = self.optimal_par[i]["gamma"]
# scaled predictor
mu[:, i] = (np.dot(f.T, beta) + np.dot(r_, gamma)).ravel()
# scaled predictor
if descale:
mu = mu * self.y_std + self.y_mean
return mu[:, -1].reshape((n_eval, 1))
def _predict_values(self, X):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
return self._predict_intermediate_values(X, self.nlvl)
def _predict_variances(self, X):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
return self.predict_variances_all_levels(X)[0][:, -1]
def predict_variances_all_levels(self, X):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
# Initialization X = atleast_2d(X)
nlevel = self.nlvl
sigma2_rhos = []
n_eval, n_features_X = X.shape
# if n_features_X != self.n_features:
# raise ValueError("Design must be an array of n_features columns.")
X = (X - self.X_offset) / self.X_scale
# Calculate kriging mean and variance at level 0
mu = np.zeros((n_eval, nlevel))
f = self._regression_types[self.options["poly"]](X)
f0 = self._regression_types[self.options["poly"]](X)
dx = self._differences(X, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
# Get regression function and correlation
F = self.F_all[0]
C = self.optimal_par[0]["C"]
beta = self.optimal_par[0]["beta"]
Ft = solve_triangular(C, F, lower=True)
# yt = solve_triangular(C, self.y_norma_all[0], lower=True)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
gamma = self.optimal_par[0]["gamma"]
# Scaled predictor
mu[:, 0] = (np.dot(f, beta) + np.dot(r_, gamma)).ravel()
self.sigma2_rho = nlevel * [None]
MSE = np.zeros((n_eval, nlevel))
r_t = solve_triangular(C, r_.T, lower=True)
G = self.optimal_par[0]["G"]
u_ = solve_triangular(G.T, f.T - np.dot(Ft.T, r_t), lower=True)
sigma2 = self.optimal_par[0]["sigma2"] / self.y_std ** 2
MSE[:, 0] = sigma2 * (
# 1 + self.optimal_noise_all[0] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1
- (r_t ** 2).sum(axis=0)
+ (u_ ** 2).sum(axis=0)
)
# Calculate recursively kriging variance at level i
for i in range(1, nlevel):
F = self.F_all[i]
C = self.optimal_par[i]["C"]
g = self._regression_types[self.options["rho_regr"]](X)
dx = self._differences(X, Y=self.X_norma_all[i])
d = self._componentwise_distance(dx)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[i], d
).reshape(n_eval, self.nt_all[i])
f = np.vstack((g.T * mu[:, i - 1], f0.T))
Ft = solve_triangular(C, F, lower=True)
yt = solve_triangular(C, self.y_norma_all[i], lower=True)
r_t = solve_triangular(C, r_.T, lower=True)
G = self.optimal_par[i]["G"]
beta = self.optimal_par[i]["beta"]
# scaled predictor
sigma2 = self.optimal_par[i]["sigma2"] / self.y_std ** 2
q = self.q_all[i]
u_ = solve_triangular(G.T, f - np.dot(Ft.T, r_t), lower=True)
sigma2_rho = np.dot(
g,
sigma2 * linalg.inv(np.dot(G.T, G))[:q, :q]
+ np.dot(beta[:q], beta[:q].T),
)
sigma2_rho = (sigma2_rho * g).sum(axis=1)
sigma2_rhos.append(sigma2_rho)
if self.name in ["MFKPLS", "MFKPLSK"]:
p = self.p_all[i]
Q_ = (np.dot((yt - np.dot(Ft, beta)).T, yt - np.dot(Ft, beta)))[0, 0]
MSE[:, i] = (
# sigma2_rho * MSE[:, i - 1]
+Q_ / (2 * (self.nt_all[i] - p - q))
# * (1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0))
* (1 - (r_t ** 2).sum(axis=0))
+ sigma2 * (u_ ** 2).sum(axis=0)
)
else:
MSE[:, i] = sigma2 * (
# 1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1
- (r_t ** 2).sum(axis=0)
+ (u_ ** 2).sum(axis=0)
) # + sigma2_rho * MSE[:, i - 1]
if self.options["propagate_uncertainty"]:
MSE[:, i] = MSE[:, i] + sigma2_rho * MSE[:, i - 1]
# scaled predictor
MSE *= self.y_std ** 2
return MSE, sigma2_rhos
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
y : np.ndarray*self.y_std/self.X_scale[kx])
Derivative values.
"""
lvl = self.nlvl
# Initialization
n_eval, n_features_x = x.shape
x = (x - self.X_offset) / self.X_scale
dy_dx = np.zeros((n_eval, lvl))
if self.options["corr"] != "squar_exp":
raise ValueError(
"The derivative is only available for square exponential kernel"
)
if self.options["poly"] == "constant":
df = np.zeros([n_eval, 1])
elif self.options["poly"] == "linear":
df = np.zeros((n_eval, self.nx + 1))
df[:, 1:] = 1
else:
raise ValueError(
"The derivative is only available for ordinary kriging or "
+ "universal kriging using a linear trend"
)
df0 = deepcopy(df)
if self.options["rho_regr"] != "constant":
raise ValueError(
"The derivative is only available for regression rho constant"
)
# Get pairwise componentwise L1-distances to the input training set
dx = self._differences(x, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
# Compute the correlation function
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
# Beta and gamma = R^-1(y-FBeta)
beta = self.optimal_par[0]["beta"]
gamma = self.optimal_par[0]["gamma"]
df_dx = np.dot(df, beta)
d_dx = x[:, kx].reshape((n_eval, 1)) - self.X_norma_all[0][:, kx].reshape(
(1, self.nt_all[0])
)
theta = self._get_theta(0)
dy_dx[:, 0] = np.ravel((df_dx - 2 * theta[kx] * np.dot(d_dx * r_, gamma)))
# Calculate recursively derivative at level i
for i in range(1, lvl):
g = self._regression_types[self.options["rho_regr"]](x)
dx = self._differences(x, Y=self.X_norma_all[i])
d = self._componentwise_distance(dx)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[i], d
).reshape(n_eval, self.nt_all[i])
df = np.vstack((g.T * dy_dx[:, i - 1], df0.T))
beta = self.optimal_par[i]["beta"]
gamma = self.optimal_par[i]["gamma"]
df_dx = np.dot(df.T, beta)
d_dx = x[:, kx].reshape((n_eval, 1)) - self.X_norma_all[i][:, kx].reshape(
(1, self.nt_all[i])
)
theta = self._get_theta(i)
# scaled predictor
dy_dx[:, i] = np.ravel(df_dx - 2 * theta[kx] * np.dot(d_dx * r_, gamma))
return dy_dx[:, -1] * self.y_std / self.X_scale[kx]
def _get_theta(self, i):
return self.optimal_theta[i]
def _check_param(self):
"""
Overrides KrgBased implementation
This function checks some parameters of the model.
"""
if self.name in ["MFKPLS", "MFKPLSK"]:
d = self.options["n_comp"]
else:
d = self.nx
if self.options["corr"] == "act_exp":
raise ValueError("act_exp correlation function must be used with MGP")
if self.name in ["MFKPLS"]:
if self.options["corr"] not in ["squar_exp", "abs_exp"]:
raise ValueError(
"MFKPLS only works with a squared exponential or an absolute exponential kernel"
)
elif self.name in ["MFKPLSK"]:
if self.options["corr"] not in ["squar_exp"]:
raise ValueError(
"MFKPLSK only works with a squared exponential kernel (until we prove the contrary)"
)
if isinstance(self.options["theta0"], np.ndarray):
if self.options["theta0"].shape != (self.nlvl, d):
raise ValueError(
"the dimensions of theta0 %s should coincide to the number of dim %s"
% (self.options["theta0"].shape, (self.nlvl, d))
)
else:
if len(self.options["theta0"]) != d:
if len(self.options["theta0"]) == 1:
self.options["theta0"] *= np.ones((self.nlvl, d))
elif len(self.options["theta0"]) == self.nlvl:
self.options["theta0"] = np.array(self.options["theta0"]).reshape(
-1, 1
)
self.options["theta0"] *= np.ones((1, d))
else:
raise ValueError(
"the length of theta0 (%s) should be equal to the number of dim (%s) or levels of fidelity (%s)."
% (len(self.options["theta0"]), d, self.nlvl)
)
else:
self.options["theta0"] *= np.ones((self.nlvl, 1))
if len(self.options["noise0"]) != self.nlvl:
if len(self.options["noise0"]) == 1:
self.options["noise0"] = self.nlvl * [self.options["noise0"]]
else:
raise ValueError(
"the length of noise0 (%s) should be equal to the number of levels of fidelity (%s)."
% (len(self.options["noise0"]), self.nlvl)
)
for i in range(self.nlvl):
if self.options["use_het_noise"]:
if len(self.X[i]) == len(np.unique(self.X[i])):
if len(self.options["noise0"][i]) != self.nt_all[i]:
if len(self.options["noise0"][i]) == 1:
self.options["noise0"][i] *= np.ones(self.nt_all[i])
else:
raise ValueError(
"for the level of fidelity %s, the length of noise0 (%s) should be equal to the number of observations (%s)."
% (i, len(self.options["noise0"][i]), self.nt_all[i])
)
else:
if len(self.options["noise0"][i]) != 1:
raise ValueError(
"for the level of fidelity %s, the length of noise0 (%s) should be equal to one."
% (i, len(self.options["noise0"][i]))
)
| bsd-3-clause | 8,541,336,273,020,419,000 | 35.769025 | 141 | 0.510022 | false | 3.578017 | false | false | false |
EricRahm/log-spam-hell | logspam/bisect.py | 1 | 10157 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from collections import Counter
from logspam import WARNING_RE
from logspam.cli import BaseCommandLineArgs
from logspam.logs import retrieve_test_logs
from mozregression.bisector import (
Bisector, Bisection, NightlyHandler, IntegrationHandler)
from mozregression.dates import parse_date
from mozregression.errors import DateFormatError
from mozregression.fetch_build_info import IntegrationInfoFetcher
from mozregression.fetch_configs import create_config
from mozregression.json_pushes import JsonPushes
from mozregression.log import init_logger
from mozregression.test_runner import TestRunner
import re
class WarningBisector(object):
def __init__(self, good, bad, platform, warning,
warning_limit, warning_re, ignore_lines,
required_test):
init_logger()
self.use_nightly = True
try:
self.good = parse_date(good)
self.bad = parse_date(bad)
except DateFormatError:
# This hopefully a revision range. We can bypass nightly and
# go directly to InboundHandler. That itself is a bit of a misnomer,
# it will still bisect m-c builds, but by changeset range, not date
# range.
self.use_nightly = False
self.good = good
self.bad = bad
self.ignore_lines = ignore_lines
self.test_runner = WarningTestRunner(
warning, platform,
ignore_lines=ignore_lines,
warning_re=warning_re,
warning_limit=warning_limit,
required_test=required_test)
# Convert the platform to a mozregression friendly version.
# Also avoid overwriting the os module by *not* using |os| for a
# variable name.
(_os, bits) = re.match(r'([a-zA-Z]+)-?([0-9]+)?', platform).groups()
if not bits or bits not in (32, 64):
bits = 32
# windows7-32
# windows7-32-vm
# win32
# win64
if '64' in platform:
bits = 64
if _os.startswith('win'):
_os = 'win'
print("_os = %s bits = %s" % (_os, bits))
# TODO(ER): We might be able to ditch this.
self.fetch_config = create_config('firefox', _os, int(bits))
# Hardcode to m-c for now.
self.fetch_config.set_repo('mozilla-central')
self.fetch_config.set_build_type('debug')
class FakeDownloadManager:
def focus_download(self, foo):
pass
dm = FakeDownloadManager()
self.bisector = Bisector(self.fetch_config, self.test_runner, dm, False, None)
def bisect(self):
if self.use_nightly:
result = self.bisect_nightly()
else:
result = self.bisect_inbound(self.good, self.bad)
(good, bad) = result
if self.test_runner.check_for_move(self.fetch_config.repo, good):
print("You should probably try bisecting again from the good revision")
print("Done bisecting I guess")
return result
def bisect_nightly(self):
handler = NightlyHandler(ensure_good_and_bad=True)
result = self.bisector.bisect(handler, self.good, self.bad)
if result == Bisection.FINISHED:
print("Got as far as we can go bisecting nightlies...")
handler.print_range()
print("Switching bisection method to taskcluster")
result = self.bisect_inbound(handler.good_revision, handler.bad_revision)
else:
# TODO(ER): maybe this should be an exception...
result = (None, None)
return result
def bisect_inbound(self, good_rev, bad_rev):
# Remember, InboundHandler is just a changeset based bisector. It will
# still potentially bisect m-c first.
handler = InboundHandler()
result = self.bisector.bisect(handler, good_rev, bad_rev, expand=0)
if result == Bisection.FINISHED:
print("No more m-c revisions :(")
handler.print_range()
# Try switching over to the integration branch.
if len(handler.build_range) == 2:
result = handler.handle_merge()
if result:
branch, good_rev, bad_rev = result
self.fetch_config.set_repo(branch)
return self.bisect_inbound(good_rev, bad_rev)
return (handler.good_revision, handler.bad_revision)
class BisectCommandLineArgs(BaseCommandLineArgs):
@staticmethod
def do_bisect(args):
print("do_bisect called")
print(args)
bisector = WarningBisector(args.good, args.bad, args.platform,
args.warning, args.warning_limit,
args.warning_re, args.ignore_lines,
args.required_test)
# TODO(ER): Get the pushlog for bad, check for the file the warning is
# in in the changeset.
(good, bad) = bisector.bisect()
def add_command(self, p):
parser = p.add_parser('bisect',
help='Attempts to find the changeset that introduced a given '
'warning through bisection.')
self.add_arguments(parser)
parser.set_defaults(func=BisectCommandLineArgs.do_bisect)
def add_arguments(self, p):
# TODO(ER): add a date/revision parser
p.add_argument('good', action='store', default=None,
help='Last known good date. Will be validated.')
p.add_argument('bad', action='store', default=None,
help='Last known bad date.')
p.add_argument('warning', nargs='?',
help='The text of a warning you want the full details of.')
super(BisectCommandLineArgs, self).add_arguments(p)
p.add_argument('--ignore-lines', action='store_true', default=False,
help='Ignore line numbers when bisecting warnings. Useful if' \
' the line number of the warning has changed. Not so ' \
'useful if there are a lot of similar warnings in the ' \
'file.')
p.add_argument('--warning-limit', action='store', type=int, default=1000,
help='The threshold of warnings for going from good to ' \
'bad. Default: 1000.')
p.add_argument('--required-test', action='store', default=None,
help='Test that must be present to compare revisions')
class WarningTestRunner(TestRunner):
"""
TestRunner to use in conjunction with bisection.
"""
def __init__(self, warning, platform='linux64', ignore_lines=False,
warning_re=WARNING_RE, warning_limit=1000,
required_test=None):
TestRunner.__init__(self)
self.warning = warning
self.warning_re = warning_re
self.platform = platform
self.ignore_lines = ignore_lines
self.warning_limit = warning_limit
self.required_test = required_test or ""
def check_for_move(self, repo, changeset):
"""
Checks if the warning has moved lines but still exists.
"""
if self.ignore_lines:
return False
files = retrieve_test_logs(
repo, changeset[:12],
self.platform, warning_re=self.warning_re)
combined_warnings = Counter()
for log in files:
if log:
combined_warnings.update(log.warnings)
possible_move_found = False
normalized = re.match(r'^(.*), line [0-9]+$', self.warning).group(1)
for (k, v) in combined_warnings.items():
if k.startswith(normalized) and v > self.warning_limit:
print("Possible line move:\n %d - %s" % (v, k))
possible_move_found = True
if possible_move_found:
jp = JsonPushes(repo)
push = jp.push(changeset)
print("Try this date: %s" % push.utc_date)
return possible_move_found
def evaluate(self, build_info, allow_back=False):
files = retrieve_test_logs(
build_info.repo_name, build_info.changeset[:12],
self.platform, warning_re=self.warning_re)
# Somewhat arbitrary, but we need to make sure there are enough tests
# run in order to make a reasonable evaluation of the amount of
# warnings present.
if not files or len(files) < 20:
# Tell the bisector to skip this build.
print("Skipping build %s, not enough tests run" % build_info.changeset[:12])
return 's'
combined_warnings = Counter()
found_test = False
for log in files:
if log:
combined_warnings.update(log.warnings)
if not found_test:
found_test = self.required_test in log.job_name
if self.ignore_lines:
normalized = re.match(r'^(.*), line [0-9]+$', self.warning).group(1)
total = 0
for (k, v) in combined_warnings.items():
if k.startswith(normalized):
total += v
print("%d - %s" % (total, normalized))
else:
total = combined_warnings[self.warning]
print("%d - %s" % (total, self.warning))
if not found_test:
print("Skipping build %s, required test %s was not run" % (
build_info.changeset[:12], self.required_test))
return 's'
if total > self.warning_limit:
print("%d > %d" % (total, self.warning_limit))
return 'b'
else:
print("%d <= %d" % (total, self.warning_limit))
return 'g'
def run_once(self, build_info):
return 0 if self.evaluate(build_info) == 'g' else 1
| mpl-2.0 | 8,093,395,756,673,294,000 | 37.184211 | 88 | 0.57773 | false | 4.145714 | true | false | false |
KarrLab/obj_model | tests/fixtures/migrate/wc_lang_fixture/wc_lang/transform/split_reversible_reactions.py | 1 | 5346 | """ Transform models.
:Author: Jonathan Karr <[email protected]>
:Date: 2018-06-19
:Copyright: 2018, Karr Lab
:License: MIT
"""
from .core import Transform
from wc_lang import Model, Reaction, RateLawDirection
from wc_onto import onto
from wc_utils.util.ontology import are_terms_equivalent
import copy
import re
class SplitReversibleReactionsTransform(Transform):
""" Split reversible reactions in non-dFBA submodels into separate forward and backward reactions """
class Meta(object):
id = 'SplitReversibleReactions'
label = 'Split reversible reactions into separate forward and backward reactions'
def run(self, model):
""" Split reversible reactions in non-dFBA submodels into separate forward and backward reactions
Args:
model (:obj:`Model`): model definition
Returns:
:obj:`Model`: same model definition, but with reversible reactions split into separate forward and backward reactions
"""
for submodel in model.submodels:
if not are_terms_equivalent(submodel.framework, onto['WC:dynamic_flux_balance_analysis']):
for rxn in list(submodel.reactions):
if rxn.reversible:
# remove reversible reaction
model.reactions.remove(rxn)
submodel.reactions.remove(rxn)
# create separate forward and reverse reactions
rxn_for = submodel.reactions.create(
model=model,
id='{}_forward'.format(rxn.id),
name='{} (forward)'.format(rxn.name),
reversible=False,
evidence=rxn.evidence,
conclusions=rxn.conclusions,
identifiers=rxn.identifiers,
comments=rxn.comments,
references=rxn.references,
)
rxn_bck = submodel.reactions.create(
model=model,
id='{}_backward'.format(rxn.id),
name='{} (backward)'.format(rxn.name),
reversible=False,
evidence=rxn.evidence,
conclusions=rxn.conclusions,
identifiers=rxn.identifiers,
comments=rxn.comments,
references=rxn.references,
)
rxn.evidence = []
rxn.conclusions = []
rxn.identifiers = []
rxn.references = []
# copy participants and negate for backward reaction
for part in rxn.participants:
rxn_for.participants.append(part)
part_back = part.species.species_coefficients.get_one(coefficient=-1 * part.coefficient)
if part_back:
rxn_bck.participants.append(part_back)
else:
rxn_bck.participants.create(species=part.species, coefficient=-1 * part.coefficient)
rxn.participants = []
# copy rate laws
law_for = rxn.rate_laws.get_one(direction=RateLawDirection.forward)
law_bck = rxn.rate_laws.get_one(direction=RateLawDirection.backward)
if law_for:
law_for.reaction = rxn_for
law_for.direction = RateLawDirection.forward
law_for.id = law_for.gen_id()
if law_bck:
law_bck.reaction = rxn_bck
law_bck.direction = RateLawDirection.forward
law_bck.id = law_bck.gen_id()
# copy dFBA objective: unreachable because only non-dFBA reactions are split
if rxn.dfba_obj_expression:
dfba_obj_expr = rxn.dfba_obj_expression # pragma: no cover
parsed_expr = dfba_obj_expr._parsed_expression # pragma: no cover
dfba_obj_expr.expression = parsed_expr.expression = re.sub(
r'\b' + rxn.id + r'\b',
'({} - {})'.format(rxn_for.id, rxn_bck.id),
dfba_obj_expr.expression) # pragma: no cover
parsed_expr._objs[Reaction].pop(rxn.id) # pragma: no cover
parsed_expr._objs[Reaction][rxn_for.id] = rxn_for # pragma: no cover
parsed_expr._objs[Reaction][rxn_bck.id] = rxn_bck # pragma: no cover
parsed_expr.tokenize() # pragma: no cover
rxn.dfba_obj_expression = None # pragma: no cover
rxn_for.dfba_obj_expression = dfba_obj_expr # pragma: no cover
rxn_bck.dfba_obj_expression = dfba_obj_expr # pragma: no cover
return model
| mit | -2,379,198,555,471,933,400 | 45.894737 | 129 | 0.491957 | false | 4.74778 | false | false | false |
PaddlePaddle/models | dygraph/mobilenet/imagenet_dataset.py | 1 | 1987 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import math
import random
import numpy as np
from paddle.vision.datasets import DatasetFolder
from paddle.vision.transforms import transforms
from paddle import fluid
class ImageNetDataset(DatasetFolder):
def __init__(self,
path,
mode='train',
image_size=224,
resize_short_size=256):
super(ImageNetDataset, self).__init__(path)
self.mode = mode
normalize = transforms.Normalize(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.120, 57.375])
if self.mode == 'train':
self.transform = transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.Transpose(order=(2, 0, 1)), normalize
])
else:
self.transform = transforms.Compose([
transforms.Resize(resize_short_size),
transforms.CenterCrop(image_size),
transforms.Transpose(order=(2, 0, 1)), normalize
])
def __getitem__(self, idx):
img_path, label = self.samples[idx]
img = cv2.imread(img_path).astype(np.float32)
label = np.array([label]).astype(np.int64)
return self.transform(img), label
def __len__(self):
return len(self.samples)
| apache-2.0 | 260,625,212,117,591,070 | 33.859649 | 74 | 0.636135 | false | 4.148225 | false | false | false |
lillisgary/shiny-shame | theme/admin.py | 1 | 1252 | from django.contrib import admin
from .models import HomePage, Slide, IconBlurb, Portfolio, PortfolioItemImage, PortfolioItem, PortfolioItemCategory, TextSlider, DocumentListItem, DocumentList, DocumentListItemCategory
from mezzanine.core.admin import TabularDynamicInlineAdmin
from mezzanine.pages.admin import PageAdmin
class SlideInline(TabularDynamicInlineAdmin):
model = Slide
class IconBlurbInline(TabularDynamicInlineAdmin):
model = IconBlurb
class PortfolioItemImageInline(TabularDynamicInlineAdmin):
model = PortfolioItemImage
class TextSliderInline(TabularDynamicInlineAdmin):
model = TextSlider
class HomePageAdmin(PageAdmin):
inlines = (SlideInline, IconBlurbInline, TextSliderInline,)
class PortfolioItemAdmin(PageAdmin):
inlines = (PortfolioItemImageInline,)
class DocumentListItemInline(TabularDynamicInlineAdmin):
model = DocumentListItem
class DocumentListAdmin(PageAdmin):
inlines = (DocumentListItemInline,)
admin.site.register(HomePage, HomePageAdmin)
admin.site.register(Portfolio, PageAdmin)
admin.site.register(PortfolioItem, PortfolioItemAdmin)
admin.site.register(PortfolioItemCategory)
admin.site.register(DocumentList, DocumentListAdmin)
admin.site.register(DocumentListItemCategory)
| gpl-2.0 | 8,358,480,571,464,116,000 | 34.771429 | 185 | 0.835463 | false | 3.888199 | false | false | false |
twz915/django | django/core/serializers/json.py | 1 | 3709 | """
Serialize data to/from JSON
"""
import datetime
import decimal
import json
import sys
import uuid
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.utils import six
from django.utils.duration import duration_iso_string
from django.utils.functional import Promise
from django.utils.timezone import is_aware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def _init_options(self):
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
self.options.update({'use_decimal': False})
self._current = None
self.json_kwargs = self.options.copy()
self.json_kwargs.pop('stream', None)
self.json_kwargs.pop('fields', None)
if self.options.get('indent'):
# Prevent trailing spaces
self.json_kwargs['separators'] = (',', ': ')
self.json_kwargs.setdefault('cls', DjangoJSONEncoder)
def start_serialization(self):
self._init_options()
self.stream.write("[")
def end_serialization(self):
if self.options.get("indent"):
self.stream.write("\n")
self.stream.write("]")
if self.options.get("indent"):
self.stream.write("\n")
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
json.dump(self.get_dump_object(obj), self.stream, **self.json_kwargs)
self._current = None
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if not isinstance(stream_or_string, (bytes, str)):
stream_or_string = stream_or_string.read()
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
try:
objects = json.loads(stream_or_string)
for obj in PythonDeserializer(objects, **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
class DjangoJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time, decimal types and UUIDs.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, datetime.timedelta):
return duration_iso_string(o)
elif isinstance(o, (decimal.Decimal, uuid.UUID, Promise)):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
| bsd-3-clause | -3,236,497,983,382,932,500 | 31.535088 | 85 | 0.60151 | false | 4.139509 | false | false | false |
nexec/vkcopy2mp3p | vkcopy2mp3p.py | 1 | 4785 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import sqlite3 as db
import sys
import os
import pycurl
import StringIO
import re
import urllib
import json
from random import shuffle
PROFILE = 'default'
argc = len(sys.argv)
if argc < 3 or argc > 4:
sys.stderr.write('Usage: %s /path/to/dir count_of_songs [PROFILE]\n'%sys.argv[0])
sys.exit(1)
PATH_TO_SAVE=sys.argv[1]
count_of_songs = int(sys.argv[2])
if argc==4:
print "update PROFILE"
PROFILE=sys.argv[3]
#sys.exit(0)
# find needed profile dir and cookiesdb from it
cookiedbpath = os.environ['HOME']+'/.mozilla/firefox/'
for name in os.listdir(cookiedbpath):
if os.path.isdir(cookiedbpath+name) and (PROFILE in name):
cookiedbpath=cookiedbpath+name+'/cookies.sqlite'
break
what = '.vk.com'
addHash='undef'
connection = db.connect(cookiedbpath)
cursor = connection.cursor()
contents = "name, value"
cursor.execute("SELECT " +contents+ " FROM moz_cookies WHERE host='" +what+ "'")
cookiemas=[]
for row in cursor.fetchall():
cookiemas.append(row[0]+'='+row[1])
connection.close()
cookiestr='; '.join(cookiemas)
tmpdir = '/tmp/add_audio_vk'
songlist=[]
# this is first run, so lets write hash value
if not os.path.isdir(tmpdir):
mus = pycurl.Curl()
ans = StringIO.StringIO()
# let's figure out our pageid
mus.setopt(pycurl.HTTPHEADER, [str('Cookie: '+cookiestr)])
mus.setopt(pycurl.URL, 'https://vk.com/feed')
mus.setopt(pycurl.FOLLOWLOCATION, 1)
mus.setopt(pycurl.WRITEFUNCTION, ans.write)
mus.setopt(pycurl.USERAGENT, "Mozilla/5.0 (X11; Linux x86_64; rv:20.0) Gecko/20100101 Firefox/20.0")
mus.perform()
mus.close()
data=ans.getvalue()
profile=re.search('<a href=\"/([^\"]+)\" onclick=\"return nav.go\(this, event, {noback: true}\)\" id=\"myprofile\" class=\"left_row\">',data)
pageid=profile.group(1)
# figure out our hash
mus = pycurl.Curl()
ans = StringIO.StringIO()
mus.setopt(pycurl.HTTPHEADER, [str('Cookie: '+cookiestr)])
mus.setopt(pycurl.URL, 'https://vk.com/'+pageid)
mus.setopt(pycurl.FOLLOWLOCATION, 1)
mus.setopt(pycurl.VERBOSE, 0)
mus.setopt(pycurl.WRITEFUNCTION, ans.write)
mus.setopt(pycurl.USERAGENT, "Mozilla/5.0 (X11; Linux x86_64; rv:20.0) Gecko/20100101 Firefox/20.0")
mus.perform()
mus.close()
data=ans.getvalue()
addhash=re.search('Page.audioStatusUpdate\(\'([^\']+)\'\)',data).group(1)
os.mkdir(tmpdir)
fwrite=open(tmpdir+'/addhash','w')
fwrite.write(addhash)
fwrite.close()
fread=open(tmpdir+'/addhash','r')
HASHSUM=fread.read()
fread.close()
# looking for first match
mus = pycurl.Curl()
ans = StringIO.StringIO()
mus.setopt(pycurl.URL, 'https://m.vk.com/audio')
mus.setopt(pycurl.HTTPHEADER, [str('Cookie: '+cookiestr),'X-Requested-With: XMLHttpRequest'])
mus.setopt(pycurl.POST, 0)
mus.setopt(pycurl.VERBOSE, 0)
mus.setopt(pycurl.FOLLOWLOCATION, 1)
mus.setopt(pycurl.WRITEFUNCTION, ans.write)
mus.perform()
mus.close()
data=ans.getvalue()
js = json.loads(data)
if js[1]==False and js[4]==False:
sys.stderr.write('Firefox\'s profile is unauthorized at vk.com\n')
sys.exit(1)
page = js[5]
page1=page
page1 = re.sub(r'cur.au_search = new QuickSearch\(extend\(',r'',page1)
page1 = re.sub(r'\)\);extend\(cur,{module:\'audio\'}\);',r'',page1)
page1 = re.sub(r'\\/',r'/',page1)
page1 = re.sub(r'mp3\?([^"]+)',r'mp3',page1)
page1 = re.sub("(\n|\r).*", '', page1)
page1 = re.sub(',"_new":true\}, \{*','}',page1)
mlist = json.loads(page1)
count=0
for index, mas in mlist['_cache'].iteritems():
#mas[2] - link
#mas[3] - author
#mas[4] - song
songlist.append(dict([('link',mas[2]),('author',mas[3]),('song',mas[4])]))
count=count+1
##
offset=count
if count==200:
while (count>0):
count=0
mus = pycurl.Curl()
ans = StringIO.StringIO()
mus.setopt(pycurl.URL, 'https://m.vk.com/audio')
mus.setopt(pycurl.HTTPHEADER, [str('Cookie: '+cookiestr),'X-Requested-With: XMLHttpRequest'])
req = '_ajax=1&offset=%d'%(offset)
mus.setopt(pycurl.POSTFIELDS, req)
mus.setopt(pycurl.POST, 1)
mus.setopt(pycurl.VERBOSE, 0)
mus.setopt(pycurl.FOLLOWLOCATION, 1)
mus.setopt(pycurl.WRITEFUNCTION, ans.write)
mus.perform()
mus.close()
data=ans.getvalue()
data = re.sub(r'\\/',r'/',data)
data = re.sub(r'mp3\?([^"]+)',r'mp3',data)
mlist = json.loads(data)
mlist=mlist[3][0]
if len(mlist)>0:
for index, mas in mlist.iteritems():
songlist.append(dict([('link',mas[2]),('author',mas[3]),('song',mas[4])]))
count=count+1
offset=offset+count
print "total count: %d"%(len(songlist))
shuffle(songlist)
mkremove = "if [ -e '%(path)s' ]; then rm -r '%(path)s'; fi; mkdir '%(path)s'" % {"path":PATH_TO_SAVE}
os.system(mkremove)
for i in range(count_of_songs):
print "%s - %s" %(songlist[i]['author'],songlist[i]['song'])
os.system("wget -P '%s' %s"%(PATH_TO_SAVE,songlist[i]['link']))
print "complete"
sys.exit(0)
| gpl-2.0 | 6,050,023,060,319,584,000 | 26.819767 | 142 | 0.672727 | false | 2.533086 | false | false | false |
veveykocute/Spl | splc.py | 1 | 19239 | import sys
import math
"""A Shakespeare Compiler written in Python, splc.py
This is a compiler that implements the majority of the Shakespeare programming language
invented by Kalle Hasselstrom and Jon Aslund, I take no credit for inventing the language.
This software is free to edit or use, and though I doubt anyone would use this for many projects,
I guess I would appreciate some degree of acknowledgment if you do.
(c) V1.2 Sam Donow 2013-2014
[email protected]
[email protected]"""
#missing features
#full support for multi-word nouns/names
#Stacks, who needs them?
pos_adj = []
neg_adj = []
pos_comp = []
neg_comp = []
pos_nouns = []
neg_nouns = []
valid_names= []
zero_nouns = ['nothing', 'zero']
src = ""
N = 0
vartable = set([])
speaker = ""
target = ""
stage = set([])
actnum = 0
act_names = {}
scene_names= []
#report a compile-time error, then exit
def Assert(b, s):
global N
if not b:
sys.stderr.write(s + " at line " + str(N) + "\n")
sys.exit(1)
#Abstraction for writing to file, eased python 2/3 agnosticity,
#and will eventually allow file output instead of stdout if that
#ever is desired
def writeToFile(s):
sys.stdout.write(str(s) + "\n")
def isNoun(word):
return word in pos_nouns or word in neg_nouns or word in zero_nouns
def isAdjective(word):
return word in pos_adj or word in neg_adj
def isComparative(word):
return word in pos_comp or word in neg_comp
#returns 1 for "nice" and neutral nouns, -1 for nasty ones
def nounValue(word):
Assert(isNoun(word), "Tried to find the nounvalue of a non-noun")
return 1 if word in pos_nouns else -1 if word in neg_nouns else 0
#return s with all whitespace characters removed
def trimWhitespace(s):
trimmed = ""
for c in s:
if c not in ['\t', '\r', '\n', ' ']:
trimmed += c
return trimmed
#return s with all whitespace characters before the first non-whitedspace character removed
def trimLeadingWhitespace(s):
trimIndex = 0
for c in s:
if c in ['\t', '\r', '\n', ' ']:
trimIndex +=1
else:
break
return s[trimIndex:]
#A whitespace-agnositic beginswith method
def beginsWithNoWhitespace(s, pattern):
return beginsWith(trimWhitespace(s), pattern)
def beginsWith(s, pattern):
return s[:len(pattern)] == pattern
def loadFileIntoList(filename, list):
f = open(filename, 'r')
for word in f.readlines():
list.append(word.split(" ")[-1][:-1])
f.close()
#load initial noun and adjective lists
def loadWordLists():
loadFileIntoList("include/neutral_adjective.wordlist" , pos_adj)
loadFileIntoList("include/positive_adjective.wordlist", pos_adj)
loadFileIntoList("include/negative_adjective.wordlist", neg_adj)
loadFileIntoList("include/positive_noun.wordlist", pos_nouns)
loadFileIntoList("include/neutral_noun.wordlist" , pos_nouns)
loadFileIntoList("include/negative_noun.wordlist", neg_nouns)
loadFileIntoList("include/positive_comparative.wordlist", pos_comp)
loadFileIntoList("include/positive_comparative.wordlist", neg_comp)
loadFileIntoList("include/character.wordlist", valid_names)
roman_values = { 'M': 1000, 'D': 500, 'C': 1000, 'L': 50, 'X': 10, 'V': 5, 'I': 1 }
def parseRomanNumeral(roman_string):
roman_string = roman_string.upper()
strindex = 0
roman_sum = 0
while strindex < len(roman_string) - 1:
if(roman_values[roman_string[strindex]] < roman_values[roman_string[strindex+1]]):
roman_sum -= roman_values[roman_string[strindex]]
else:
roman_sum += roman_values[roman_string[strindex]]
strindex += 1
return roman_sum + roman_values[roman_string[strindex]]
def isNumber(s):
words = s.split(" ")
for word in words:
if isNoun(word):
return True
return False
#parse a string that is supposed to evaluate to a number
#if failOk is set to true, will return 0 for phrases that do not evaluate to a number
def parseNum(s, failOk = False):
words = s.split(" ")
nounIndex = len(words)
for i in range(0,len(words)):
if isNoun(words[i]):
nounIndex = i
break
ok = nounIndex < len(words)
if not ok and failOk:
return 0
Assert (ok, str(words) + "\nExpected a number, but found no noun")
value = nounValue(words[nounIndex])
for word in words[:nounIndex]:
if isAdjective(word):
value *= 2
return value
def parseEnterOrExit():
global stage
endBracket = src[N].find(']')
Assert(endBracket >= 0, "[ without matching ]")
enterOrExit = src[N][src[N].find('[')+1:src[N].find(']')]
if beginsWithNoWhitespace(enterOrExit, "Enter"):
names = enterOrExit[enterOrExit.find(" ") + 1:].split(" and ")
for namestr in names:
name = namestr.split(" ")[-1]
Assert(name in vartable, "Undeclared actor entering a scene")
stage.add(name)
Assert(len(stage) < 3, "Too many actors on stage")
elif beginsWithNoWhitespace(enterOrExit, "Exit"):
names = enterOrExit[enterOrExit.find(" ") + 1:].split(" and ")
for namestr in names:
name = namestr.split(" ")[-1]
Assert(name in stage, "Trying to make an actor who is not in the scene exit")
stage.remove(name)
elif beginsWithNoWhitespace(enterOrExit, "Exeunt"):
stage = set([])
else:
Assert(False, "Bracketed clause without Enter, Exit, or Exeunt")
#returns the index of the leftmost punctuation mark in s
def findPunctuation(s):
valids = []
for val in [s.find('.'), s.find('!'), s.find('?')]:
if val >= 0:
valids.append(val)
return -1 if len(valids) == 0 else min(valids)
#returns an array of the punctuation-delimited statements at the current location in the parsing
def getStatements():
global N
statements = []
line = trimLeadingWhitespace(src[N])
unfinished = False
while line.find(':') < 0 and line.find('[') < 0:
punctuation = findPunctuation(line)
if punctuation < 0:
if unfinished == False:
statements.append(line[:-1])
else:
statements[-1] += line[:-1]
N += 1
line = src[N]
unfinished = True
elif punctuation > 0:
if not unfinished:
statements.append("")
statements[-1] += line[:punctuation]
line = line[punctuation + 1:]
unfinished = False
retval = []
for stat in statements:
if len(trimWhitespace(stat)) > 0:
retval.append(stat)
return retval
class Tree:
def __init__(self, v, l, r):
self.value = v
self.left = l
self.right = r
def wordToOperator(op):
if op == "sum":
return "+"
elif op == "difference":
return "-"
elif op == "quotient":
return "/"
elif op == "product":
return "*"
else:
Assert(False, "Illegal Operator")
binop = ["sum", "difference", "quotient", "product"]
unop = ["square", "cube", "twice"]
def buildExpressionTree(expr):
Assert (len(expr) > 0, "Ill-formed Expression in " + str(expr))
if expr[0] == "square":
if expr[1] == "root":
op = "(int)sqrt"
expr = expr[2:]
num, expr = buildExpressionTree(expr)
return Tree(op, num, ""), expr
elif expr[0] == "remainder":
if expr[1] == "of" and expr[2] == "the" and expr[3] == "quotient":
expr = expr[4:]
op = "%"
left, expr = buildExpressionTree(expr)
right, expr = buildExpressionTree(expr)
return Tree(op, left, right), expr
if expr[0] in binop:
op = wordToOperator(expr[0])
expr = expr[1:]
left, expr = buildExpressionTree(expr)
right, expr = buildExpressionTree(expr)
return Tree(op, left, right), expr
elif expr[0] in unop:
op = expr[0]
expr = expr[1:]
num, expr = buildExpressionTree(expr)
return Tree(op, num, ""), expr
if True:
i = 1 if expr[0] == "and" else 0
numstr = ""
while expr[i] not in binop and expr[i] not in unop and expr[i] not in ["and", "remainder"]:
if expr[i] in ["you", "thee", "yourself", "thyself", "thou"]:
expr = expr[i + 1:]
return Tree(target, "", ""), expr
elif expr[i] in ["me", "myself", "i"]:
expr = expr[i + 1:]
return Tree(speaker, "", ""), expr
elif expr[i].capitalize() in vartable:
name = expr[i]
expr = expr[i + 1:]
return Tree(name.capitalize(), "", ""), expr
elif i == len(expr) - 1:
numstr += expr[i]
i = len(expr)
break
else:
numstr += expr[i] + " "
i += 1
if i == len(expr):
expr = []
else:
expr = expr[i:]
if not isNumber(numstr):
return buildExpressionTree(expr)
else:
return Tree(str(parseNum(numstr)), "", ""), expr
def TreeToString(tree):
if tree.left == "":
#just a value
return str(tree.value)
elif tree.right == "":
#unary operator
return str(tree.value) + "(" + TreeToString(tree.left) + ")"
else:
#binary operator
return "(" + TreeToString(tree.left) + " " + str(tree.value) + " " + TreeToString(tree.right) + ")"
def parseExpr(expr):
tree = buildExpressionTree(expr.split(" "))[0]
return TreeToString(tree)
def concatWords(wordArray):
c = ""
for word in wordArray:
c += word
return c
def firstWord(statment):
words = statement.split(" ")
for word in words:
if len(word) > 0:
return word
def parseStatement(stat):
statement = trimLeadingWhitespace(stat).lower()
first = statement.split(" ")[0]
trimmed = trimWhitespace(statement)
if first in ["you", "thou"]:
#this is an assignment of the form Prounoun [as adj as] expression
expr = ""
if statement.rfind("as") >= 0:
expr = statement[statement.rfind("as") + 3:]
else:
expr = statement[len(first) + 1:]
return target + " = " + parseExpr(expr) + " ;\n"
elif trimmed == "openyourheart" or trimmed == "openthyheart":
#numerical output
return 'fprintf(stdout, "%d", ' + target + ');\n'
elif trimmed == "speakyourmind" or trimmed == "speakthymind":
#character output
return 'fprintf(stdout, "%c", (char)' + target + ');\n'
elif trimmed == "listentoyourheart" or trimmed == "listentothyheart":
#numerical input
return 'fgets(inputbuffer, BUFSIZ, stdin);\nsscanf(inputbuffer, "%d", &' + target + ');\n' #" = getchar() - '0';\n"
elif trimmed == "openyourmind" or trimmed == "openyourmind":
#character input
return target + " = getchar();\n"
elif first in ["am", "are", "art", "be", "is"]:
#questions - do not yet support "not"
left = ""
kind = ""
right = ""
if statement.find("as") >= 0:
left, kind, right = statement.split(" as ")
Assert(isAdjective(kind), "Ill-formed conditional in " + statement)
kind = "equal"
elif statement.find("more") >= 0:
words = statement.split(" ")
moreloc = 0
for i in range(0, len(words)):
if words[i] == "more":
moreloc = i
break
Assert(isAdjective(words[moreloc + 1]), "Ill-formed conditional in " + statement)
kind = "greater" if words[moreloc + 1] in pos_adj else "lesser"
left, right = statement.split(" more " + words[moreloc + 1] + " ")
else:
comp = ""
for word in statement.split(" "):
if isComparative(word):
comp = word
break
Assert(len(comp) > 0, "Ill-formed conditional in " + statement)
kind = "greater" if comp in pos_comp else "lesser"
left, right = statement.split(comp)
return "condition = (" + parseExpr(left) + ") " + (">" if kind == "greater" else "<" if kind == "lesser" else "==") + " (" + parseExpr(right) + ");\n"
elif beginsWith(statement, "if so,"):
#positive condition
location = statement.find("if so,")
return "if (condition) {\n " + parseStatement(statement[location + 7:]) + " }\n"
elif beginsWith(statement, "if not,"):
#negative condition
location = statement.find("if not,")
return "if (!condition) {\n " + parseStatement(statement[location + 8:]) + " }\n"
elif beginsWith(statement, "let us") or beginsWith(statement, "we shall") or beginsWith(statement, "we must"):
words = statement.split(" ")
nextTwo = words[2] + " " + words[3]
Assert (nextTwo == "return to" or nextTwo == "proceed to", "Ill-formed goto")
# classic goto with scene or act
if words[4] == "scene" or words[4] == "act":
typeword = words[4] if words[4] == "act" else ("act_" + str(actnum) + "_scene")
return "goto " + typeword + str(parseRomanNumeral(words[5])) + ";\n"
else:
restOfPhrase = concatWords(words[4:])
type_ = "scene" if restOfPhrase in scene_names[actnum].keys() \
else "act" if restOfPhrase in act_names.keys() else "none"
Assert (type_ != "none", "Goto refers to nonexistant act or scene")
nameDict = act_names if type_ == "act" else scene_names[actnum]
typeword = act if type_ == "act" else ("act_" + str(actnum) + "_scene")
return "goto " + typeword + str(nameDict[restOfPhrase]) + ";\n"
else:
return ""
def writeScenes(scenes, isLast):
writeToFile("act" + str(actnum) + ": {\ngoto act_" + str(actnum) + "_scene1;\n}")
for j in range(0, len(scenes)):
writeToFile("act_" + str(actnum) + "_scene" + str(j + 1) + ": {")
writeToFile(scenes[j])
if j < len(scenes) - 1:
writeToFile("goto act_" + str(actnum) + "_scene" + str(j + 2) + ";\n")
elif not isLast:
writeToFile("goto act" + str(actnum + 1) + ";\n")
writeToFile("}")
def handleDeclarations():
global N
global src
#variables, declaration syntax:
#Name, value
declarations = []
unfinished = False
while not beginsWithNoWhitespace(src[N], 'Act'):
Assert(N < len(src) - 1, "File contains no Acts")
if len(trimWhitespace(src[N])) > 0:
if not unfinished:
declarations.append(src[N])
else:
declarations[-1] += src[N]
unfinished = src[N].find('.') < 0
N += 1
for dec in declarations:
commaIndex = dec.find(',')
Assert(commaIndex > 0, "Improper declaration " + str(declarations))
wordsInName = trimLeadingWhitespace(dec[:commaIndex]).split(" ")
varname = wordsInName[-1]
value = parseNum(dec[commaIndex:-2], True)
writeToFile("int " + str(varname) + " = " + str(value) + ";")
Assert(varname in valid_names, "Non-Shakespearean variable name")
vartable.add(varname)
def getActOrSceneNumber(s, actOrScene):
num = s[s.find(actOrScene):].split(" ")[1]
if num.find(':') > 0:
num = num[:num.find(':')]
else:
Assert (False, "Bad " + actOrScene + " heading")
return parseRomanNumeral(num)
def getActOrSceneDescription(s):
desc = trimWhitespace(s[s.find(':')+1:]).lower()
p = findPunctuation(desc)
if p > 0:
desc = desc[:p]
return desc
# Gets all the names of scenes and acts, and adds them to the respective tables
# This must be done in a preprocessing step, in order to enable gotos to future acts/scenes
def parseAllActAndSceneDescriptions():
global scene_names
global act_names
current_act = 0
current_scene = 0
scene_names = [{}]
for line in src:
if beginsWithNoWhitespace(line, "Act"):
desc = getActOrSceneDescription(line)
current_act += 1
act_names[desc] = current_act
scene_names.append(dict())
current_scene = 0
elif beginsWithNoWhitespace(line, "Scene"):
desc = getActOrSceneDescription(line)
current_scene += 1
scene_names[current_act][desc] = current_scene
#-------------------------------Begin Main Program-------------------------#
Assert(len(sys.argv) > 1, "No input file")
filename = sys.argv[1]
f = open(filename, 'r')
src = f.readlines()
f.close()
loadWordLists()
#parse the title - all the text up until the first .
#title is unimportant and is thrown out
while src[N].find('.') < 0:
N += 1
N += 1
#title is thrown out
writeToFile("// " + filename + "\n" +
"// compiled with splc.py (c) Sam Donow 2013-2015\n" +
"#include <stdio.h>\n" +
"#include <math.h>\n" +
'#include "include/mathhelpers.h"\n' +
"int condition = 0;\n" +
"char inputbuffer[BUFSIZ];\n" +
"int main() {\n")
handleDeclarations()
parseAllActAndSceneDescriptions()
scenes = []
unfinished = False
while N < len(src):
if beginsWithNoWhitespace(src[N], 'Act'):
Assert (getActOrSceneNumber(src[N], 'Act') == actnum + 1, "Illegal Act numbering")
if actnum > 0:
writeScenes(scenes, False)
scenes = []
actnum += 1
#act_names[getActOrSceneDescription(src[N])] = actnum
N += 1
elif beginsWithNoWhitespace(src[N], 'Scene'):
Assert (getActOrSceneNumber(src[N], 'Scene') == len(scenes) + 1, "Illegal Scene numbering")
#scene_names[getActOrSceneDescription(src[N])] = len(scenes) + 1
N += 1
speaker = ""
target = ""
while (N < len(src)) and not (beginsWithNoWhitespace(src[N], 'Scene') or beginsWithNoWhitespace(src[N], 'Act')):
if beginsWithNoWhitespace(src[N], '['):
parseEnterOrExit()
if not unfinished:
scenes.append(";\n")
unfinished = True
N += 1
elif src[N].find(':') >= 0:
name = (src[N][:src[N].find(':')]).split(" ")[-1]
Assert (name in stage, "An actor who is not on stage is trying to speak")
for actor in stage:
if actor != name:
target = actor
speaker = name
N += 1
statements = getStatements()
scenecode = ""
for statement in statements:
scenecode += parseStatement(statement)
if not unfinished:
scenes.append(scenecode)
unfinished = True
else:
scenes[-1] += scenecode
else:
N += 1
unfinished = False
else:
N += 1
writeScenes(scenes, True)
writeToFile("}")
| unlicense | 5,124,951,847,631,947,000 | 34.561922 | 158 | 0.565466 | false | 3.567402 | false | false | false |
IndyMPO/IndyGeoTools | ConvertGeography/GetAreaConversionMatrix.py | 1 | 3774 | #This script copyright 2017 Indianapolis Metropolitan Planning Organization
from __future__ import division
import arcpy
import os
import pandas as pd
import numpy as np
from subprocess import Popen
import sys
def clear_temp():
'''
Clears the temporary directory that is created when running this tool
'''
temp_dir = r'C:\TEMP'
for f in os.listdir(temp_dir): #Remove all files within the directory
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir) #Remove the directory itself
def main(*args):
#Read in inputs
from_shp_file = args[0]
from_field = args[1]
to_shp_file = args[2]
to_field = args[3]
outfile = args[4]
show_matrix = args[5]
remove_temp_if_successful = args[6]
remove_temp_if_error = args[7]
if from_field == to_field:
to_field += '_1'
#Check if the outfile is specified as a csv file. If it isn't, do so.
if outfile[-4:] != '.csv':
outfile += '.csv'
#Create temporary directory
temp_dir = r'C:\TEMP'
os.mkdir(temp_dir)
temp_shp = os.path.join(temp_dir, 'TEMP.shp')
from_shp = os.path.join(temp_dir, 'FROM.shp')
to_shp = os.path.join(temp_dir, 'TO.shp')
#Copy input shapefiles into temporary directory
arcpy.CopyFeatures_management(from_shp_file, from_shp)
arcpy.CopyFeatures_management(to_shp_file, to_shp)
#Process the data. If an error occurs, the temporary directory will be deleted, and then the exception will be raised
try:
#Intersect the two shapefiles and calculate the area of the intersected shapefile
arcpy.Intersect_analysis([from_shp, to_shp], temp_shp)
temp2_shp = temp_shp.replace('.shp', '2.shp')
arcpy.CalculateAreas_stats(temp_shp, temp2_shp)
#Create a list of all of the origin and destination polygons
from_list = []
to_list = []
polygons = arcpy.da.SearchCursor(temp_shp, [from_field, to_field])
for polygon in polygons:
from_list += [polygon[0]]
to_list += [polygon[1]]
del polygons
from_codes = pd.Series(from_list).value_counts().index
to_codes = pd.Series(to_list).value_counts().index
#Create matrix with total area of each intersected polygon, arranged by the from polygon and to polygon
areas = pd.DataFrame(np.zeros((len(to_codes), len(from_codes))), index = to_codes, columns = from_codes)
polygons = arcpy.da.SearchCursor(temp2_shp, [from_field, to_field, 'F_AREA'])
for polygon in polygons:
areas.loc[polygon[1], polygon[0]] = polygon[2]
del polygons
#Divide each column of the matrix by its sum
total = areas.sum(0)
out_data = areas.copy()
for row in out_data.index:
out_data.loc[row] /= total
#Write to csv, and delete the temporary directory
out_data.to_csv(outfile)
if remove_temp_if_successful:
clear_temp()
except Exception as e:
if remove_temp_if_error:
clear_temp()
exc_type, exc_obj, exc_tb = sys.exc_info()
print (exc_tb.tb_lineno)
raise e
#Open the file if instructed to do so
if show_matrix:
Popen(outfile, shell = True)
if __name__ == '__main__':
from_shp_file = arcpy.GetParameterAsText(0)
from_field = arcpy.GetParameterAsText(1)
to_shp_file = arcpy.GetParameterAsText(2)
to_field = arcpy.GetParameterAsText(3)
outfile = arcpy.GetParameter(4)
show_matrix = arcpy.GetParameter(5)
remove_temp_if_successful = arcpy.GetParameter(6)
remove_temp_if_error = arcpy.GetParameter(7)
main(from_shp_file, from_field, to_shp_file, to_field, outfile, show_matrix, remove_temp_if_successful, remove_temp_if_error)
| apache-2.0 | -4,349,660,485,601,096,700 | 34.271028 | 129 | 0.642024 | false | 3.478341 | false | false | false |
mission-peace/interview | python/dynamic/weighted_job_scheduling_max_profit.py | 1 | 1192 | """
Problem Statement
=================
Given set of jobs with start and end interval and profit, how to maximize profit such that jobs in subset do not
overlap.
Video
-----
* https://youtu.be/cr6Ip0J9izc
Complexity
----------
* Runtime Complexity: O(n^2)
* Space Complexity: O(n)
Reference Link
--------------
* http://www.cs.princeton.edu/courses/archive/spr05/cos423/lectures/06dynamic-programming.pdf
"""
def can_sequence(job1, job2):
_, job1_finish_time = job1
job2_start_time, _ = job2
return job1_finish_time <= job2_start_time
def find_max_profit(jobs):
sequenced_jobs = sorted(jobs.keys(), key=lambda x: x[1])
T = [jobs[job_key] for job_key in sequenced_jobs]
num_jobs = len(sequenced_jobs)
for j in range(1, num_jobs):
for i in range(0, j):
if can_sequence(sequenced_jobs[i], sequenced_jobs[j]):
T[j] = max(T[j], T[i] + jobs[sequenced_jobs[j]])
return max(T)
if __name__ == '__main__':
jobs = {
(1, 3): 5, # (start_time, end_time, total_cost)
(2, 5): 6,
(4, 6): 5,
(6, 7): 4,
(5, 8): 11,
(7, 9): 2
}
assert 17 == find_max_profit(jobs)
| apache-2.0 | -7,740,483,645,734,945,000 | 21.490566 | 112 | 0.568792 | false | 2.886199 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.