repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
AzCiS/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/HeadExceptions/setup.py | 10 | 1146 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestheadexceptiontestservice"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.4.0", "msrestazure>=0.4.0"]
setup(
name=NAME,
version=VERSION,
description="AutoRestHeadExceptionTestService",
author_email="",
url="",
keywords=["Swagger", "AutoRestHeadExceptionTestService"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest
"""
)
| mit |
sergiocorato/bank-payment | account_banking/res_partner.py | 14 | 2871 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class ResPartner(orm.Model):
_inherit = 'res.partner'
def def_journal_account_bank(
self, cr, uid, ids, get_property_account, context=None):
"""
Returns the property journal account for the given partners ids.
:param get_property_account: method of this object that takes
a partner browse record and returns a field name of type many2one.
"""
if not ids:
return {}
res = dict([(res_id, False) for res_id in ids])
for partner in self.browse(cr, uid, ids, context=context):
property_account = get_property_account(partner)
if partner[property_account]:
res[partner.id] = partner[property_account].id
return res
def get_property_account_decrease(self, partner):
if partner.customer and not partner.supplier:
return 'property_account_receivable'
return 'property_account_payable'
def get_property_account_increase(self, partner):
if partner.supplier and not partner.customer:
return 'property_account_payable'
return 'property_account_receivable'
def def_journal_account_bank_decr(
self, cr, uid, ids, context=None):
"""
Return the default journal account to be used for this partner
in the case of bank transactions that decrease the balance.
"""
return self.def_journal_account_bank(
cr, uid, ids, self.get_property_account_decrease, context=context)
def def_journal_account_bank_incr(
self, cr, uid, ids, context=None):
"""
Return the default journal account to be used for this partner
in the case of bank transactions that increase the balance.
"""
return self.def_journal_account_bank(
cr, uid, ids, self.get_property_account_increase, context=context)
| agpl-3.0 |
mateusz880/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/win/win-tool/gyptest-win-tool-handles-readonly-files.py | 164 | 1699 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure overwriting read-only files works as expected (via win-tool).
"""
import TestGyp
import filecmp
import os
import stat
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
# First, create the source files.
os.makedirs('subdir')
read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C']
for f in read_only_files:
test.write(f, 'source_contents')
test.chmod(f, stat.S_IREAD)
if os.access(f, os.W_OK):
test.fail_test()
# Second, create the read-only destination files. Note that we are creating
# them where the ninja and win-tool will try to copy them to, in order to test
# that copies overwrite the files.
os.makedirs(test.built_file_path('dest/subdir'))
for f in read_only_files:
f = os.path.join('dest', f)
test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN')
test.chmod(test.built_file_path(f), stat.S_IREAD)
# Ensure not writable.
if os.access(test.built_file_path(f), os.W_OK):
test.fail_test()
test.run_gyp('copies_readonly_files.gyp')
test.build('copies_readonly_files.gyp')
# Check the destination files were overwritten by ninja.
for f in read_only_files:
f = os.path.join('dest', f)
test.must_contain(test.built_file_path(f), 'source_contents')
# This will fail if the files are not the same mode or contents.
for f in read_only_files:
if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))):
test.fail_test()
test.pass_test()
| gpl-3.0 |
kylehogan/haas | tests/unit/model.py | 3 | 3270 | # Copyright 2013-2014 Massachusetts Open Cloud Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Functional tests for model.py"""
# Some Notes:
#
# * We don't really have any agreed-upon requirements about what __repr__
# should print, but I'm fairly certain I hit an argument mistmatch at
# some point, which is definitely wrong. The test_repr methods are there just
# to make sure it isn't throwing an exception.
from haas.model import *
from haas import config
from haas.ext.obm.ipmi import Ipmi
from haas.test_common import fresh_database, config_testsuite, ModelTest, \
fail_on_log_warnings
import pytest
fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings)
@pytest.fixture
def configure():
config_testsuite()
config.load_extensions()
fresh_database = pytest.fixture(fresh_database)
pytestmark = pytest.mark.usefixtures('configure', 'fresh_database')
class TestNic(ModelTest):
def sample_obj(self):
return Nic(Node(label='node-99',
obm=Ipmi(type=Ipmi.api_name,
host="ipmihost",
user="root",
password="tapeworm")),
'ipmi', '00:11:22:33:44:55')
class TestNode(ModelTest):
def sample_obj(self):
return Node(label='node-99',
obm=Ipmi(type=Ipmi.api_name,
host="ipmihost",
user="root",
password="tapeworm"))
class TestProject(ModelTest):
def sample_obj(self):
return Project('manhattan')
class TestHeadnode(ModelTest):
def sample_obj(self):
return Headnode(Project('anvil-nextgen'),
'hn-example', 'base-headnode')
class TestHnic(ModelTest):
def sample_obj(self):
return Hnic(Headnode(Project('anvil-nextgen'),
'hn-0', 'base-headnode'),
'storage')
class TestNetwork(ModelTest):
def sample_obj(self):
pj = Project('anvil-nextgen')
return Network(pj, [pj], True, '102', 'hammernet')
class TestNetworkingAction(ModelTest):
def sample_obj(self):
nic = Nic(Node(label='node-99',
obm=Ipmi(type=Ipmi.api_name,
host="ipmihost",
user="root",
password="tapeworm")),
'ipmi', '00:11:22:33:44:55')
project = Project('anvil-nextgen')
network = Network(project, [project], True, '102', 'hammernet')
return NetworkingAction(nic=nic,
new_network=network,
channel='null')
| apache-2.0 |
minhtuancn/odoo | addons/l10n_be_coda/__init__.py | 430 | 1105 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import l10n_be_coda
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
xen0l/ansible | test/units/modules/network/ios/test_ios_banner.py | 52 | 2546 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.ios import ios_banner
from units.modules.utils import set_module_args
from .ios_module import TestIosModule, load_fixture
class TestIosBannerModule(TestIosModule):
module = ios_banner
def setUp(self):
super(TestIosBannerModule, self).setUp()
self.mock_exec_command = patch('ansible.modules.network.ios.ios_banner.exec_command')
self.exec_command = self.mock_exec_command.start()
self.mock_load_config = patch('ansible.modules.network.ios.ios_banner.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestIosBannerModule, self).tearDown()
self.mock_exec_command.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('ios_banner_show_banner.txt').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_ios_banner_create(self):
for banner_type in ('login', 'motd', 'exec', 'incoming', 'slip-ppp'):
set_module_args(dict(banner=banner_type, text='test\nbanner\nstring'))
commands = ['banner {0} @\ntest\nbanner\nstring\n@'.format(banner_type)]
self.execute_module(changed=True, commands=commands)
def test_ios_banner_remove(self):
set_module_args(dict(banner='login', state='absent'))
commands = ['no banner login']
self.execute_module(changed=True, commands=commands)
def test_ios_banner_nochange(self):
banner_text = load_fixture('ios_banner_show_banner.txt').strip()
set_module_args(dict(banner='login', text=banner_text))
self.execute_module()
| gpl-3.0 |
radiasoft/radtrack | experimental/tshaftan/2dipoles1.py | 2 | 10661 | # -*- coding: utf-8 -*-
"""Simulation of SR from 2 dipole edges
"""
from __future__ import absolute_import, division, print_function
from pykern.pkdebug import pkdc, pkdp
from pykern import pkarray
import srwlib
from array import array
import uti_plot
print('1. Defining Particle for Trajectory Calculations...')
# Particle
part = srwlib.SRWLParticle()
part.x = 0.0 #beam.partStatMom1.x
part.y = 0.0 #beam.partStatMom1.y
part.xp = 0.0 #beam.partStatMom1.xp
part.yp = 0.0 #beam.partStatMom1.yp
part.gamma = 0.064/0.51099890221e-03 #Relative Energy beam.partStatMom1.gamma #
part.z = -0.0 #zcID #- 0.5*magFldCnt.MagFld[0].rz
part.relE0 = 1 #Electron Rest Mass
part.nq = -1 #Electron Charge
print('2. Defining Beam for Synchrotron Radiation Calculations...')
# Electron Beam
elecBeam = srwlib.SRWLPartBeam()
elecBeam.Iavg = 0.1 #Average Current [A]
elecBeam.partStatMom1.x = part.x #Initial Transverse Coordinates (initial Longitudinal Coordinate will be defined later on) [m]
elecBeam.partStatMom1.y = part.y
elecBeam.partStatMom1.z = part.z #Initial Longitudinal Coordinate (set before the ID)
elecBeam.partStatMom1.xp = part.xp #Initial Relative Transverse Velocities
elecBeam.partStatMom1.yp = part.yp
elecBeam.partStatMom1.gamma = part.gamma #Relative Energy
sigEperE = 0.1 #relative RMS energy spread
sigX = (1.5e-06/(64/0.511)*0.1)**(1/2) #horizontal RMS size of e-beam [m]
sigXp = (1.5e-06/(64/0.511)/0.1) **(1/2) #horizontal RMS angular divergence [rad]
sigY = sigX #vertical RMS size of e-beam [m]
sigYp = sigXp #vertical RMS angular divergence [rad]
elecBeam.arStatMom2[0] = sigX*sigX #<(x-<x>)^2>
elecBeam.arStatMom2[1] = 0 #<(x-<x>)(x'-<x'>)>
elecBeam.arStatMom2[2] = sigXp*sigXp #<(x'-<x'>)^2>
elecBeam.arStatMom2[3] = sigY*sigY #<(y-<y>)^2>
elecBeam.arStatMom2[4] = 0 #<(y-<y>)(y'-<y'>)>
elecBeam.arStatMom2[5] = sigYp*sigYp #<(y'-<y'>)^2>
elecBeam.arStatMom2[10] = sigEperE*sigEperE #<(E-<E>)^2>/<E>^2
print('3. Defining Magnetic Elements...')
# Elements
L_bend=0.05
L_drift=0.02
L_total=0.2 #2*L_bend+L_drift
bend1=srwlib.SRWLMagFldM(_G=-0.85, _m=1, _n_or_s='n', _Leff=L_bend, _Ledge=0.01)
bend2=srwlib.SRWLMagFldM(_G=0.85, _m=1, _n_or_s='n', _Leff=L_bend, _Ledge=0.01)
drift1 = srwlib.SRWLMagFldM(_G=0.0,_m=1, _n_or_s='n', _Leff=L_drift) #Drift
print('4. Collecting Elements into Container...')
# Container
arZero = array('d', [0]*3)
arZc = array('d', [-L_bend/2-L_drift/2, 0, L_bend/2+L_drift/2])
magFldCnt = srwlib.SRWLMagFldC() #Container
magFldCnt.allocate(3) #Magnetic Field consists of 1 part
magFldCnt = srwlib.SRWLMagFldC([bend1, drift1, bend2], arZero, arZero, arZc)
# Container for a single dipole
#arZero = array('d', [0]*1)
#arZc = array('d', [-L_bend])
#magFldCnt = srwlib.SRWLMagFldC([bend1], arZero, arZero, arZc)
print('5. Making Allocation for Trajectory Waveform ...')
#Definitions and allocation for the Trajectory waveform
arPrecPar = [1]
npTraj = 10001 # number of trajectory points along longitudinal axis
partTraj = srwlib.SRWLPrtTrj()
partTraj.partInitCond = part
partTraj.allocate(npTraj, True)
partTraj.ctStart = -L_total/2
partTraj.ctEnd = L_total/2
print('6. Calculating Trajectory ...')
# Calculating Trajectory
partTraj = srwlib.srwl.CalcPartTraj(partTraj, magFldCnt, arPrecPar)
ctMesh = [partTraj.ctStart, partTraj.ctEnd, partTraj.np]
print('7. Plotting Trajectory ...')
uti_plot.uti_plot1d(partTraj.arX, ctMesh, ['ct [m]', 'Horizontal Position [m]'])
uti_plot.uti_plot1d(partTraj.arY, ctMesh, ['ct [m]', 'Vertical Position [m]'])
uti_plot.uti_plot1d(partTraj.arXp, ctMesh, ['ct [m]', 'Horizontal angle [rad]'])
uti_plot.uti_plot_show()
print('8. Switching to Synchrotron Radiation Calculations ...')
el1=0 # This FLAG defines type of calculation:
# Either filament beam calculation or for heat load calc
if el1==0:
wfr2 = srwlib.SRWLWfr() #For intensity distribution at fixed photon energy
else:
wfr2 = srwlib.SRWLStokes()
print('9. Defining SR Wavefront ...')
# Defining SR Wavefront
wfr2.mesh.ne= 1
wfr2.mesh.nx=401
wfr2.mesh.ny=401
wfr2.allocate(wfr2.mesh.ne, wfr2.mesh.nx, wfr2.mesh.ny) #Numbers of points vs Photon Energy, Horizontal and Vertical Positions
wfr2.mesh.zStart = 0.3 #Longitudinal Position [m] at which SR has to be calculated
wfr2.mesh.eStart = 2.1 #Initial Photon Energy [eV]
wfr2.mesh.eFin = 2.1 #Final Photon Energy [eV]
wfr2.mesh.xStart = -0.01 #Initial Horizontal Position [m]
wfr2.mesh.xFin = 0.01 #Final Horizontal Position [m]
wfr2.mesh.yStart = -0.01 #Initial Vertical Position [m]
wfr2.mesh.yFin = 0.01 #Final Vertical Position [m]
wfr2.partBeam = elecBeam
#This defines mesh for "thick" beam calculation
meshRes = srwlib.SRWLRadMesh(wfr2.mesh.eStart, wfr2.mesh.eFin, wfr2.mesh.ne, wfr2.mesh.xStart,
wfr2.mesh.xFin, wfr2.mesh.nx, wfr2.mesh.yStart, wfr2.mesh.yFin, wfr2.mesh.ny, wfr2.mesh.zStart) #to ensure correct final mesh if _opt_bl==None
print('10. Defining Precision of SR Calculations ...')
# Defining Precision of SR Calculations ...
meth = 2 #SR calculation method: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"
relPrec = 0.01 #relative precision
zStartInteg = partTraj.ctStart #0 #longitudinal position to start integration (effective if < zEndInteg)
zEndInteg = partTraj.ctEnd #0 #longitudinal position to finish integration (effective if > zStartInteg)
npTraj = 2000 #Number of points for trajectory calculation
useTermin = 0 #Use "terminating terms" (i.e. asymptotic expansions at zStartInteg and zEndInteg) or not (1 or 0 respectively)
sampFactNxNyForProp = 0 #sampling factor for adjusting nx, ny (effective if > 0)
arPrecPar = [meth, relPrec, zStartInteg, zEndInteg, npTraj, useTermin, sampFactNxNyForProp]
if el1==0:
print('11. Calculating SR Wavefront ...')
srwlib.srwl.CalcElecFieldSR(wfr2, elecBeam, magFldCnt, arPrecPar)
else:
# This computes heat load
# print('11. Calculating SR Heat Load ...')
# srwlib.srwl.CalcPowDenSR(wfr2, elecBeam, 0, magFldCnt, arPrecPar)
#This computes "thick" electron beam
print('11. Calculating SR Wavefront vua multi-electron propagation...')
srwlib.srwl_wfr_emit_prop_multi_e(elecBeam,magFldCnt, meshRes, meth,
relPrec, 1, _n_part_avg_proc=1, _n_save_per=100,
_file_path=None, _sr_samp_fact=-1, _opt_bl=None, _pres_ang=0, _char=0,
_x0=0, _y0=0, _e_ph_integ=0, _rand_meth=1)
print('12. Extracting Intensity from calculated Electric Field ...')
if el1==0:
print('13. Plotting results ...')
# 2-D distribution
arI2 = array('f', [0]*wfr2.mesh.nx*wfr2.mesh.ny) #"flat" array to take 2D intensity data
srwlib.srwl.CalcIntFromElecField(arI2, wfr2, 6, 1, 3, wfr2.mesh.eStart, 0, 0)
uti_plot.uti_plot2d(arI2, [1000*wfr2.mesh.xStart, 1000*wfr2.mesh.xFin, wfr2.mesh.nx],
[1000*wfr2.mesh.yStart, 1000*wfr2.mesh.yFin, wfr2.mesh.ny],
['Horizontal Position [mm]', 'Vertical Position [mm]',
'Intensity at ' + str(wfr2.mesh.eStart) + ' eV'])
# 1-D distribution
arI1 = array('f', [0]*wfr2.mesh.nx)
srwlib.srwl.CalcIntFromElecField(arI1, wfr2, 6, 1, 1, wfr2.mesh.eStart, 0, 0)
uti_plot.uti_plot1d(arI1, [wfr2.mesh.xStart, wfr2.mesh.xFin*0, wfr2.mesh.nx],
['Horizontal coordinate [mm]', 'Intensity [ph/s/.1%bw/mm^2]', 'Distribution'])
else:
print('13. Plotting results ...')
# 1-D distribution
# plotMeshX = [1000*wfr2.mesh.xStart, 1000*wfr2.mesh.xFin*0, wfr2.mesh.nx]
# powDenVsX = array('f', [0]*wfr2.mesh.nx)
# for i in range(wfr2.mesh.nx): powDenVsX[i] = wfr2.arS[wfr2.mesh.nx*int(wfr2.mesh.ny*0.5) + i]
# uti_plot.uti_plot1d(powDenVsX, plotMeshX, ['Horizontal Position [mm]', 'Power Density [W/mm^2]', 'Power Density\n(horizontal cut at y = 0)'])
arI1 = array('f', [0]*wfr2.mesh.nx)
srwlib.srwl.CalcIntFromElecField(arI1, wfr2, 6, 0, 3, wfr2.mesh.eStart, 0, 0)
uti_plot.uti_plot1d(arI1, [wfr2.mesh.xStart, wfr2.mesh.xFin*0, wfr2.mesh.nx],
['Photon Energy [eV]', 'Intensity [ph/s/.1%bw/mm^2]', 'Distribution'])
# 2-D distribution
# arI2 = array('f', [0]*wfr2.mesh.nx*wfr2.mesh.ny) #"flat" array to take 2D intensity data
# srwlib.srwl.CalcIntFromElecField(arI2, meshRes, 6, 0, 3, wfr2.mesh.eStart, 0, 0)
# uti_plot.uti_plot2d(arI2, [1000*wfr2.mesh.xStart, 1000*wfr2.mesh.xFin, wfr2.mesh.nx],
# [1000*wfr2.mesh.yStart, 1000*wfr2.mesh.yFin, wfr2.mesh.ny],
# ['Horizontal Position [mm]', 'Vertical Position [mm]',
# 'Intensity at ' + str(wfr2.mesh.eStart) + ' eV'])
print('14. Saving results ...')
f = open('Trajectory.txt', 'w')
ctStep = 0
if partTraj.np > 0:
ctStep = (partTraj.ctEnd - partTraj.ctStart)/(partTraj.np - 1)
ct = partTraj.ctStart
for i in range(partTraj.np):
resStr = str(ct) + '\t' + repr(partTraj.arX[i]) + '\t' + repr(partTraj.arXp[i]) + '\t' + repr(partTraj.arY[i]) + '\t' + repr(partTraj.arYp[i]) + '\t' + repr(partTraj.arZ[i]) + '\t' + repr(partTraj.arZp[i])
if(hasattr(partTraj, 'arBx')):
resStr += '\t' + repr(partTraj.arBx[i])
if(hasattr(partTraj, 'arBy')):
resStr += '\t' + repr(partTraj.arBy[i])
if(hasattr(partTraj, 'arBz')):
resStr += '\t' + repr(partTraj.arBz[i])
f.write(resStr + '\n')
ct += ctStep
f.close()
f = open('1DprofileSR.txt', 'w')
xStep = 0
if wfr2.mesh.nx > 0:
xStep = (wfr2.mesh.xFin - wfr2.mesh.xStart)/(wfr2.mesh.nx - 1)
x = wfr2.mesh.xStart
for i in range(wfr2.mesh.nx ):
resStr = str(x) + '\t' + repr(arI1[i]) + '\t'
f.write(resStr + '\n')
x += xStep
f.close()
uti_plot.uti_plot_show()
print('15. Calculation is Complete.')
def EmittanceOptimizer(sigX,sigY,sigXp,sigYp):
elecBeam.arStatMom2[0] = sigX*sigX
elecBeam.arStatMom2[2] = sigXp*sigXp
elecBeam.arStatMom2[3] = sigY*sigY
elecBeam.arStatMom2[5] = sigYp*sigYp
srwlib.srwl.CalcElecFieldSR(wfr2, elecBeam, magFldCnt, arPrecPar)
arI1 = array('f', [0]*wfr2.mesh.nx)
srwlib.srwl.CalcIntFromElecField(arI1, wfr2, 6, 1, 1, wfr2.mesh.eStart, 0, 0)
return (arI1)
def read_data(SFileName="1DprofileSR.txt", TFileName="Trajectory.txt"):
# Reading SPECTRUM
# SFileName="Spectrum.txt"
f=open(SFileName,"r",1000)
e_p=[]
I_rad=[]
for line in f.readlines():
words = line.split()
e_p.append(words[0])
I_rad.append(words[1])
f.close()
# Reading TRAJECTORY
# TFileName="Trajectory.txt"
f=open(TFileName,"r",10000)
z_dist=[]
x_trajectory=[]
for line in f.readlines():
words = line.split()
z_dist.append(words[0])
x_trajectory.append(words[1])
f.close()
uti_plot.uti_plot1d(x_trajectory, [1, 10000, 10000],
['ct [um]', 'Horizontal Position [m]'])
uti_plot.uti_plot_show()
read_data() | apache-2.0 |
colinligertwood/odoo | addons/account_analytic_plans/__init__.py | 445 | 1104 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_plans
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RossBrunton/django | tests/utils_tests/test_crypto.py | 447 | 4581 | from __future__ import unicode_literals
import binascii
import hashlib
import unittest
from django.utils.crypto import constant_time_compare, pbkdf2
class TestUtilsCryptoMisc(unittest.TestCase):
def test_constant_time_compare(self):
# It's hard to test for constant time, just test the result.
self.assertTrue(constant_time_compare(b'spam', b'spam'))
self.assertFalse(constant_time_compare(b'spam', b'eggs'))
self.assertTrue(constant_time_compare('spam', 'spam'))
self.assertFalse(constant_time_compare('spam', 'eggs'))
class TestUtilsCryptoPBKDF2(unittest.TestCase):
# http://tools.ietf.org/html/draft-josefsson-pbkdf2-test-vectors-06
rfc_vectors = [
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "0c60c80f961f0e71f3a9b524af6012062fe037a6",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 2,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 4096,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "4b007901b765489abead49d926f721d065a429c1",
},
# # this takes way too long :(
# {
# "args": {
# "password": "password",
# "salt": "salt",
# "iterations": 16777216,
# "dklen": 20,
# "digest": hashlib.sha1,
# },
# "result": "eefe3d61cd4da4e4e9945b3d6ba2158c2634e984",
# },
{
"args": {
"password": "passwordPASSWORDpassword",
"salt": "saltSALTsaltSALTsaltSALTsaltSALTsalt",
"iterations": 4096,
"dklen": 25,
"digest": hashlib.sha1,
},
"result": "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038",
},
{
"args": {
"password": "pass\0word",
"salt": "sa\0lt",
"iterations": 4096,
"dklen": 16,
"digest": hashlib.sha1,
},
"result": "56fa6aa75548099dcc37d7f03425e0c3",
},
]
regression_vectors = [
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha256,
},
"result": "120fb6cffcf8b32c43e7225256c4f837a86548c9",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha512,
},
"result": "867f70cf1ade02cff3752599a3a53dc4af34c7a6",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1000,
"dklen": 0,
"digest": hashlib.sha512,
},
"result": ("afe6c5530785b6cc6b1c6453384731bd5ee432ee"
"549fd42fb6695779ad8a1c5bf59de69c48f774ef"
"c4007d5298f9033c0241d5ab69305e7b64eceeb8d"
"834cfec"),
},
# Check leading zeros are not stripped (#17481)
{
"args": {
"password": b'\xba',
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": '0053d3b91a7f1e54effebd6d68771e8a6e0b2c5b',
},
]
def test_public_vectors(self):
for vector in self.rfc_vectors:
result = pbkdf2(**vector['args'])
self.assertEqual(binascii.hexlify(result).decode('ascii'),
vector['result'])
def test_regression_vectors(self):
for vector in self.regression_vectors:
result = pbkdf2(**vector['args'])
self.assertEqual(binascii.hexlify(result).decode('ascii'),
vector['result'])
| bsd-3-clause |
ucbrise/clipper | containers/python/rpc.py | 1 | 27820 | from __future__ import print_function
import zmq
import threading
import numpy as np
import struct
import time
from datetime import datetime
import socket
import sys
import os
import yaml
import logging
from collections import deque
if sys.version_info < (3, 0):
from subprocess32 import Popen, PIPE
else:
from subprocess import Popen, PIPE
from prometheus_client import start_http_server
from prometheus_client.core import Counter, Gauge, Histogram, Summary
import clipper_admin.metrics as metrics
RPC_VERSION = 3
INPUT_TYPE_BYTES = 0
INPUT_TYPE_INTS = 1
INPUT_TYPE_FLOATS = 2
INPUT_TYPE_DOUBLES = 3
INPUT_TYPE_STRINGS = 4
REQUEST_TYPE_PREDICT = 0
REQUEST_TYPE_FEEDBACK = 1
MESSAGE_TYPE_NEW_CONTAINER = 0
MESSAGE_TYPE_CONTAINER_CONTENT = 1
MESSAGE_TYPE_HEARTBEAT = 2
HEARTBEAT_TYPE_KEEPALIVE = 0
HEARTBEAT_TYPE_REQUEST_CONTAINER_METADATA = 1
SOCKET_POLLING_TIMEOUT_MILLIS = 5000
SOCKET_ACTIVITY_TIMEOUT_MILLIS = 30000
EVENT_HISTORY_BUFFER_SIZE = 30
EVENT_HISTORY_SENT_HEARTBEAT = 1
EVENT_HISTORY_RECEIVED_HEARTBEAT = 2
EVENT_HISTORY_SENT_CONTAINER_METADATA = 3
EVENT_HISTORY_RECEIVED_CONTAINER_METADATA = 4
EVENT_HISTORY_SENT_CONTAINER_CONTENT = 5
EVENT_HISTORY_RECEIVED_CONTAINER_CONTENT = 6
MAXIMUM_UTF_8_CHAR_LENGTH_BYTES = 4
BYTES_PER_LONG = 8
# Initial size of the buffer used for receiving
# request input content
INITIAL_INPUT_CONTENT_BUFFER_SIZE = 1024
# Initial size of the buffers used for sending response
# header data and receiving request header data
INITIAL_HEADER_BUFFER_SIZE = 1024
INPUT_HEADER_DTYPE = np.dtype(np.uint64)
logger = logging.getLogger(__name__)
def string_to_input_type(input_str):
input_str = input_str.strip().lower()
byte_strs = ["b", "bytes", "byte"]
int_strs = ["i", "ints", "int", "integer", "integers"]
float_strs = ["f", "floats", "float"]
double_strs = ["d", "doubles", "double"]
string_strs = ["s", "strings", "string", "strs", "str"]
if any(input_str == s for s in byte_strs):
return INPUT_TYPE_BYTES
elif any(input_str == s for s in int_strs):
return INPUT_TYPE_INTS
elif any(input_str == s for s in float_strs):
return INPUT_TYPE_FLOATS
elif any(input_str == s for s in double_strs):
return INPUT_TYPE_DOUBLES
elif any(input_str == s for s in string_strs):
return INPUT_TYPE_STRINGS
else:
return -1
def input_type_to_dtype(input_type):
if input_type == INPUT_TYPE_BYTES:
return np.dtype(np.int8)
elif input_type == INPUT_TYPE_INTS:
return np.dtype(np.int32)
elif input_type == INPUT_TYPE_FLOATS:
return np.dtype(np.float32)
elif input_type == INPUT_TYPE_DOUBLES:
return np.dtype(np.float64)
elif input_type == INPUT_TYPE_STRINGS:
return str
def input_type_to_string(input_type):
if input_type == INPUT_TYPE_BYTES:
return "bytes"
elif input_type == INPUT_TYPE_INTS:
return "ints"
elif input_type == INPUT_TYPE_FLOATS:
return "floats"
elif input_type == INPUT_TYPE_DOUBLES:
return "doubles"
elif input_type == INPUT_TYPE_STRINGS:
return "string"
class EventHistory:
def __init__(self, size):
self.history_buffer = deque(maxlen=size)
def insert(self, msg_type):
curr_time_millis = time.time() * 1000
self.history_buffer.append((curr_time_millis, msg_type))
def get_events(self):
return self.history_buffer
class PredictionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Server(threading.Thread):
def __init__(self, context, clipper_ip, clipper_port):
threading.Thread.__init__(self)
self.context = context
self.clipper_ip = clipper_ip
self.clipper_port = clipper_port
self.event_history = EventHistory(EVENT_HISTORY_BUFFER_SIZE)
def validate_rpc_version(self, received_version):
if received_version != RPC_VERSION:
print(
"ERROR: Received an RPC message with version: {clv} that does not match container version: {mcv}"
.format(clv=received_version, mcv=RPC_VERSION))
def handle_prediction_request(self, prediction_request):
"""
Returns
-------
PredictionResponse
A prediction response containing an output
for each input included in the specified
predict response
"""
predict_fn = self.get_prediction_function()
total_length = 0
outputs = predict_fn(prediction_request.inputs)
# Type check the outputs:
if not type(outputs) == list:
raise PredictionError("Model did not return a list")
if len(outputs) != len(prediction_request.inputs):
raise PredictionError(
"Expected model to return %d outputs, found %d outputs" %
(len(prediction_request.inputs), len(outputs)))
if not type(outputs[0]) == str:
raise PredictionError("Model must return a list of strs. Found %s"
% type(outputs[0]))
for o in outputs:
total_length += len(o)
response = PredictionResponse(prediction_request.msg_id)
for output in outputs:
response.add_output(output)
return response
def handle_feedback_request(self, feedback_request):
"""
Returns
-------
FeedbackResponse
A feedback response corresponding
to the specified feedback request
"""
response = FeedbackResponse(feedback_request.msg_id, "ACK")
return response
def get_prediction_function(self):
if self.model_input_type == INPUT_TYPE_INTS:
return self.model.predict_ints
elif self.model_input_type == INPUT_TYPE_FLOATS:
return self.model.predict_floats
elif self.model_input_type == INPUT_TYPE_DOUBLES:
return self.model.predict_doubles
elif self.model_input_type == INPUT_TYPE_BYTES:
return self.model.predict_bytes
elif self.model_input_type == INPUT_TYPE_STRINGS:
return self.model.predict_strings
else:
print(
"Attempted to get predict function for invalid model input type!"
)
raise
def get_event_history(self):
return self.event_history.get_events()
def run(self, collect_metrics=True):
print("Serving predictions for {0} input type.".format(
input_type_to_string(self.model_input_type)))
connected = False
clipper_address = "tcp://{0}:{1}".format(self.clipper_ip,
self.clipper_port)
poller = zmq.Poller()
sys.stdout.flush()
sys.stderr.flush()
self.input_header_buffer = bytearray(INITIAL_HEADER_BUFFER_SIZE)
self.input_content_buffer = bytearray(
INITIAL_INPUT_CONTENT_BUFFER_SIZE)
while True:
socket = self.context.socket(zmq.DEALER)
poller.register(socket, zmq.POLLIN)
socket.connect(clipper_address)
self.send_heartbeat(socket)
while True:
receivable_sockets = dict(
poller.poll(SOCKET_POLLING_TIMEOUT_MILLIS))
if socket not in receivable_sockets or receivable_sockets[socket] != zmq.POLLIN:
# Failed to receive a message before the specified polling timeout
if connected:
curr_time = datetime.now()
time_delta = curr_time - last_activity_time_millis
time_delta_millis = (time_delta.seconds * 1000) + (
time_delta.microseconds / 1000)
if time_delta_millis >= SOCKET_ACTIVITY_TIMEOUT_MILLIS:
# Terminate the session
print("Connection timed out, reconnecting...")
connected = False
poller.unregister(socket)
socket.close()
break
else:
self.send_heartbeat(socket)
sys.stdout.flush()
sys.stderr.flush()
continue
# Received a message before the polling timeout
if not connected:
connected = True
last_activity_time_millis = datetime.now()
t1 = datetime.now()
# Receive delimiter between routing identity and content
socket.recv()
rpc_version_bytes = socket.recv()
rpc_version = struct.unpack("<I", rpc_version_bytes)[0]
self.validate_rpc_version(rpc_version)
msg_type_bytes = socket.recv()
msg_type = struct.unpack("<I", msg_type_bytes)[0]
if msg_type == MESSAGE_TYPE_HEARTBEAT:
self.event_history.insert(EVENT_HISTORY_RECEIVED_HEARTBEAT)
print("Received heartbeat!")
sys.stdout.flush()
sys.stderr.flush()
heartbeat_type_bytes = socket.recv()
heartbeat_type = struct.unpack("<I",
heartbeat_type_bytes)[0]
if heartbeat_type == HEARTBEAT_TYPE_REQUEST_CONTAINER_METADATA:
self.send_container_metadata(socket)
continue
elif msg_type == MESSAGE_TYPE_NEW_CONTAINER:
self.event_history.insert(
EVENT_HISTORY_RECEIVED_CONTAINER_METADATA)
print(
"Received erroneous new container message from Clipper!"
)
continue
elif msg_type == MESSAGE_TYPE_CONTAINER_CONTENT:
self.event_history.insert(
EVENT_HISTORY_RECEIVED_CONTAINER_CONTENT)
msg_id_bytes = socket.recv()
msg_id = int(struct.unpack("<I", msg_id_bytes)[0])
print("Got start of message %d " % msg_id)
# list of byte arrays
request_header = socket.recv()
request_type = struct.unpack("<I", request_header)[0]
if request_type == REQUEST_TYPE_PREDICT:
input_header_size_raw = socket.recv()
input_header_size_bytes = struct.unpack(
"<Q", input_header_size_raw)[0]
typed_input_header_size = int(
input_header_size_bytes /
INPUT_HEADER_DTYPE.itemsize)
if len(self.input_header_buffer
) < input_header_size_bytes:
self.input_header_buffer = bytearray(
input_header_size_bytes * 2)
# While this procedure still incurs a copy, it saves a potentially
# costly memory allocation by ZMQ. This savings only occurs
# if the input header did not have to be resized
input_header_view = memoryview(
self.input_header_buffer)[:input_header_size_bytes]
input_header_content = socket.recv(copy=False).buffer
input_header_view[:
input_header_size_bytes] = input_header_content
parsed_input_header = np.frombuffer(
self.input_header_buffer,
dtype=INPUT_HEADER_DTYPE)[:typed_input_header_size]
input_type, num_inputs, input_sizes = parsed_input_header[
0], parsed_input_header[1], parsed_input_header[2:]
input_dtype = input_type_to_dtype(input_type)
input_sizes = [
int(inp_size) for inp_size in input_sizes
]
if input_type == INPUT_TYPE_STRINGS:
inputs = self.recv_string_content(
socket, num_inputs, input_sizes)
else:
inputs = self.recv_primitive_content(
socket, num_inputs, input_sizes, input_dtype)
t2 = datetime.now()
if int(input_type) != int(self.model_input_type):
print((
"Received incorrect input. Expected {expected}, "
"received {received}").format(
expected=input_type_to_string(
int(self.model_input_type)),
received=input_type_to_string(
int(input_type))))
raise
t3 = datetime.now()
prediction_request = PredictionRequest(
msg_id_bytes, inputs)
response = self.handle_prediction_request(
prediction_request)
t4 = datetime.now()
response.send(socket, self.event_history)
recv_time = (t2 - t1).total_seconds()
parse_time = (t3 - t2).total_seconds()
handle_time = (t4 - t3).total_seconds()
if collect_metrics:
metrics.report_metric('clipper_mc_pred_total', 1)
metrics.report_metric('clipper_mc_recv_time_ms',
recv_time * 1000.0)
metrics.report_metric('clipper_mc_parse_time_ms',
parse_time * 1000.0)
metrics.report_metric('clipper_mc_handle_time_ms',
handle_time * 1000.0)
metrics.report_metric(
'clipper_mc_end_to_end_latency_ms',
(recv_time + parse_time + handle_time) *
1000.0)
print("recv: %f s, parse: %f s, handle: %f s" %
(recv_time, parse_time, handle_time))
sys.stdout.flush()
sys.stderr.flush()
else:
feedback_request = FeedbackRequest(msg_id_bytes, [])
response = self.handle_feedback_request(received_msg)
response.send(socket, self.event_history)
print("recv: %f s" % ((t2 - t1).total_seconds()))
sys.stdout.flush()
sys.stderr.flush()
def recv_string_content(self, socket, num_inputs, input_sizes):
# Create an empty numpy array that will contain
# input string references
inputs = np.empty(num_inputs, dtype=object)
for i in range(num_inputs):
# Obtain a memoryview of the received message's
# ZMQ frame buffer
input_item_buffer = socket.recv(copy=False).buffer
# Copy the memoryview content into a string object
input_str = input_item_buffer.tobytes()
inputs[i] = input_str
return inputs
def recv_primitive_content(self, socket, num_inputs, input_sizes,
input_dtype):
def recv_different_lengths():
# Create an empty numpy array that will contain
# input array references
inputs = np.empty(num_inputs, dtype=object)
for i in range(num_inputs):
# Receive input data and copy it into a byte
# buffer that can be parsed into a writeable
# array
input_item_buffer = socket.recv(copy=True)
input_item = np.frombuffer(
input_item_buffer, dtype=input_dtype)
inputs[i] = input_item
return inputs
def recv_same_lengths():
input_type_size_bytes = input_dtype.itemsize
input_content_size_bytes = sum(input_sizes)
typed_input_content_size = int(
input_content_size_bytes / input_type_size_bytes)
if len(self.input_content_buffer) < input_content_size_bytes:
self.input_content_buffer = bytearray(
input_content_size_bytes * 2)
input_content_view = memoryview(
self.input_content_buffer)[:input_content_size_bytes]
item_start_idx = 0
for i in range(num_inputs):
input_size = input_sizes[i]
# Obtain a memoryview of the received message's
# ZMQ frame buffer
input_item_buffer = socket.recv(copy=False).buffer
# Copy the memoryview content into a pre-allocated content buffer
input_content_view[item_start_idx:item_start_idx +
input_size] = input_item_buffer
item_start_idx += input_size
# Reinterpret the content buffer as a typed numpy array
inputs = np.frombuffer(
self.input_content_buffer,
dtype=input_dtype)[:typed_input_content_size]
# All inputs are of the same size, so we can use
# np.reshape to construct an input matrix
inputs = np.reshape(inputs, (len(input_sizes), -1))
return inputs
if len(set(input_sizes)) == 1:
return recv_same_lengths()
else:
return recv_different_lengths()
def send_container_metadata(self, socket):
if sys.version_info < (3, 0):
socket.send("", zmq.SNDMORE)
else:
socket.send("".encode('utf-8'), zmq.SNDMORE)
socket.send(struct.pack("<I", MESSAGE_TYPE_NEW_CONTAINER), zmq.SNDMORE)
socket.send_string(self.model_name, zmq.SNDMORE)
socket.send_string(str(self.model_version), zmq.SNDMORE)
socket.send_string(str(self.model_input_type), zmq.SNDMORE)
socket.send(struct.pack("<I", RPC_VERSION))
self.event_history.insert(EVENT_HISTORY_SENT_CONTAINER_METADATA)
print("Sent container metadata!")
sys.stdout.flush()
sys.stderr.flush()
def send_heartbeat(self, socket):
if sys.version_info < (3, 0):
socket.send("", zmq.SNDMORE)
else:
socket.send_string("", zmq.SNDMORE)
socket.send(struct.pack("<I", MESSAGE_TYPE_HEARTBEAT))
self.event_history.insert(EVENT_HISTORY_SENT_HEARTBEAT)
print("Sent heartbeat!")
class PredictionRequest:
"""
Parameters
----------
msg_id : bytes
The raw message id associated with the RPC
prediction request message
inputs :
One of [[byte]], [[int]], [[float]], [[double]], [string]
"""
def __init__(self, msg_id, inputs):
self.msg_id = msg_id
self.inputs = inputs
def __str__(self):
return self.inputs
class PredictionResponse:
header_buffer = bytearray(INITIAL_HEADER_BUFFER_SIZE)
def __init__(self, msg_id):
"""
Parameters
----------
msg_id : bytes
The message id associated with the PredictRequest
for which this is a response
"""
self.msg_id = msg_id
self.outputs = []
self.num_outputs = 0
def add_output(self, output):
"""
Parameters
----------
output : string
"""
if not isinstance(output, str):
output = unicode(output, "utf-8").encode("utf-8")
else:
output = output.encode('utf-8')
self.outputs.append(output)
self.num_outputs += 1
def send(self, socket, event_history):
"""
Sends the encapsulated response data via
the specified socket
Parameters
----------
socket : zmq.Socket
event_history : EventHistory
The RPC event history that should be
updated as a result of this operation
"""
assert self.num_outputs > 0
output_header, header_length_bytes = self._create_output_header()
if sys.version_info < (3, 0):
socket.send("", flags=zmq.SNDMORE)
else:
socket.send_string("", flags=zmq.SNDMORE)
socket.send(
struct.pack("<I", MESSAGE_TYPE_CONTAINER_CONTENT),
flags=zmq.SNDMORE)
socket.send(self.msg_id, flags=zmq.SNDMORE)
socket.send(struct.pack("<Q", header_length_bytes), flags=zmq.SNDMORE)
socket.send(output_header, flags=zmq.SNDMORE)
for idx in range(self.num_outputs):
if idx == self.num_outputs - 1:
# Don't use the `SNDMORE` flag if
# this is the last output being sent
socket.send(self.outputs[idx])
else:
socket.send(self.outputs[idx], flags=zmq.SNDMORE)
event_history.insert(EVENT_HISTORY_SENT_CONTAINER_CONTENT)
def _expand_buffer_if_necessary(self, size):
"""
If necessary, expands the reusable output
header buffer to accomodate content of the
specified size
size : int
The size, in bytes, that the buffer must be
able to store
"""
if len(PredictionResponse.header_buffer) < size:
PredictionResponse.header_buffer = bytearray(size * 2)
def _create_output_header(self):
"""
Returns
----------
(bytearray, int)
A tuple with the output header as the first
element and the header length as the second
element
"""
header_length = BYTES_PER_LONG * (len(self.outputs) + 1)
self._expand_buffer_if_necessary(header_length)
header_idx = 0
struct.pack_into("<Q", PredictionResponse.header_buffer, header_idx,
self.num_outputs)
header_idx += BYTES_PER_LONG
for output in self.outputs:
struct.pack_into("<Q", PredictionResponse.header_buffer,
header_idx, len(output))
header_idx += BYTES_PER_LONG
return PredictionResponse.header_buffer[:header_length], header_length
class FeedbackRequest():
def __init__(self, msg_id, content):
self.msg_id = msg_id
self.content = content
def __str__(self):
return self.content
class FeedbackResponse():
def __init__(self, msg_id, content):
self.msg_id = msg_id
self.content = content
def send(self, socket):
socket.send("", flags=zmq.SNDMORE)
socket.send(
struct.pack("<I", MESSAGE_TYPE_CONTAINER_CONTENT),
flags=zmq.SNDMORE)
socket.send(self.msg_id, flags=zmq.SNDMORE)
socket.send(self.content)
class ModelContainerBase(object):
def predict_ints(self, inputs):
pass
def predict_floats(self, inputs):
pass
def predict_doubles(self, inputs):
pass
def predict_bytes(self, inputs):
pass
def predict_strings(self, inputs):
pass
class RPCService:
def __init__(self, collect_metrics=True, read_config=True):
self.collect_metrics = collect_metrics
if read_config:
self._read_config_from_environment()
def _read_config_from_environment(self):
try:
self.model_name = os.environ["CLIPPER_MODEL_NAME"]
except KeyError:
print(
"ERROR: CLIPPER_MODEL_NAME environment variable must be set",
file=sys.stdout)
sys.exit(1)
try:
self.model_version = os.environ["CLIPPER_MODEL_VERSION"]
except KeyError:
print(
"ERROR: CLIPPER_MODEL_VERSION environment variable must be set",
file=sys.stdout)
sys.exit(1)
self.host = "127.0.0.1"
if "CLIPPER_IP" in os.environ:
self.host = os.environ["CLIPPER_IP"]
else:
print("Connecting to Clipper on localhost")
self.port = 7000
if "CLIPPER_PORT" in os.environ:
self.port = int(os.environ["CLIPPER_PORT"])
else:
print("Connecting to Clipper with default port: {port}".format(
port=self.port))
self.input_type = "doubles"
if "CLIPPER_INPUT_TYPE" in os.environ:
self.input_type = os.environ["CLIPPER_INPUT_TYPE"]
else:
print("Using default input type: doubles")
self.model_path = os.environ["CLIPPER_MODEL_PATH"]
def get_model_path(self):
return self.model_path
def get_input_type(self):
return self.input_type
def get_event_history(self):
if self.server:
return self.server.get_event_history()
else:
print("Cannot retrieve message history for inactive RPC service!")
raise
def start(self, model):
"""
Args:
model (object): The loaded model object ready to make predictions.
"""
try:
ip = socket.gethostbyname(self.host)
except socket.error as e:
print("Error resolving %s: %s" % (self.host, e))
sys.exit(1)
context = zmq.Context()
self.server = Server(context, ip, self.port)
self.server.model_name = self.model_name
self.server.model_version = self.model_version
self.server.model_input_type = string_to_input_type(self.input_type)
self.server.model = model
# Create a file named model_is_ready.check to show that model and container
# are ready
with open("/model_is_ready.check", "w") as f:
f.write("READY")
if self.collect_metrics:
start_metric_server()
add_metrics()
self.server.run(collect_metrics=self.collect_metrics)
def add_metrics():
config_file_path = 'metrics_config.yaml'
config_file_path = os.path.join(
os.path.split(os.path.realpath(__file__))[0], config_file_path)
with open(config_file_path, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config = config['Model Container']
prefix = 'clipper_{}_'.format(config.pop('prefix'))
for name, spec in config.items():
metric_type = spec.get('type')
metric_description = spec.get('description')
name = prefix + name
if metric_type == 'Histogram' and 'bucket' in spec.keys():
buckets = spec['bucket'] + [float("inf")]
metrics.add_metric(name, metric_type, metric_description, buckets)
else: # This case include default histogram buckets + all other
metrics.add_metric(name, metric_type, metric_description)
def start_metric_server():
DEBUG = False
cmd = ['python', '-m', 'clipper_admin.metrics.server']
if DEBUG:
cmd.append('DEBUG')
Popen(cmd)
# sleep is necessary because Popen returns immediately
time.sleep(5)
| apache-2.0 |
tarsqi/ttk | utilities/convert.py | 1 | 46925 | """convert.py
Some format conversion utilities. Run all commands below from the parent
directory using the -m option.
1. Convert LDC TimeBank into a modern TimeBank in the TTK format.
$ python -m utilities.convert --timebank2ttk TIMEBANK_DIR TTK_DIR
Converts TimeBank 1.2 as released by LDC into a version without makeinstance
tags using the TTK format. This should be run on the data/extra files in the
LDC distribution because those have the metadata that allow the TimeBank meta
data parser to find the DCT.
2. Convert Thyme format into TTK.
$ python -m utilities.convert --thyme2ttk THYME_TEXT_DIR THYME_ANNO_DIR TTK_DIR
Note that in the Thyme corpus we have annotation directories like
AnnotationData/coloncancer/Dev, whereas in the text directories we find
TextData/dev. The latter will have more files than the former. Files in
THYME_TEXT_DIR but not in THYME_ANNO_DIR will be ignored.
3. Convert the TTK format into HTML.
$ python -m utilities.convert --ttk2html TTK_DIR HTML_DIR
$ python -m utilities.convert --ttk2html --show-links TTK_DIR HTML_DIR
Converts TTK files in TTK_DIR into HTML files in HTML_DIR, if --show-links is
used links are shown in addition to the timexes and events.
4. Convert Knowtator format into TTK.
$ python -m utilities.convert --knowtator2ttk KNOWTATOR_DIR TTK_DIR
$ python -m utilities.convert --knowtator2ttk --tarsqi KNOWTATOR_DIR TTK_DIR
This is not a general conversion from any Knowtator output, it assumes that
the input annotations are all events, timexes and tlinks. If the --tarsqi
option is used then the event tags are put in the tarsqi_tags repository, by
default they are put in the source_tags repository (that is, they are not
considered Tarsqi results), using the --tarsqi option can be useful for
evaluation.
$ python -m utilities.convert --knowtator2ttk --tarsqi TEXT_FILE TTK_FILE
Version for processing a single file. You only supply the text file, the code
assumes that there is a file TEXT_FILE.knowtator.xml with the annotations.
5. Convert TTK into Knowtator format.
$ python -m utilities.convert --ttk2knowtator TTK_FILE TEXT_FILE ANNO_FILE
IN PROGRESS.
6. Convert from ECB into TTK
$ python -m utilities.convert --ecb2ttk ECB_DIR OUT_DIR
THE ECB_DIR directory should be the top-level directory of the ECB
distribution (which has a README file and a data directory which includes
directories for each topic). Converted files are written to OUT_DIR, the
structure of which mirrors the structure of the ECB directory. Each TTK file
written to the output has the topic id as a metadata property as well as a
couple of MENTION tags in the source_tags section. These mentions tend to be
Evita events, but are impoverished in the sense that they only have three
attributes: begin, end and chain. The idea is that the information in
mentions will be merged with information in events.
Will at some point also include the ECB+ data.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os, sys, getopt, codecs, time, glob
from xml.dom import minidom, Node
import tarsqi
from docmodel.main import create_source_parser
from docmodel.main import create_metadata_parser
from docmodel.main import create_docstructure_parser
from docmodel.document import TarsqiDocument, Tag, ProcessingStep
from library.main import TarsqiLibrary
from io import open
DEBUG = True
DEBUG = False
LIBRARY = TarsqiLibrary()
TIMEX = LIBRARY.timeml.TIMEX
EVENT = LIBRARY.timeml.EVENT
SIGNAL = LIBRARY.timeml.SIGNAL
ALINK = LIBRARY.timeml.ALINK
SLINK = LIBRARY.timeml.SLINK
TLINK = LIBRARY.timeml.TLINK
LID = LIBRARY.timeml.LID
TID = LIBRARY.timeml.TID
EID = LIBRARY.timeml.EID
EIID = LIBRARY.timeml.EIID
EVENTID = LIBRARY.timeml.EVENTID
RELTYPE = LIBRARY.timeml.RELTYPE
TIME_ID = LIBRARY.timeml.TIME_ID
EVENT_INSTANCE_ID = LIBRARY.timeml.EVENT_INSTANCE_ID
RELATED_TO_TIME = LIBRARY.timeml.RELATED_TO_TIME
RELATED_TO_EVENT_INSTANCE = LIBRARY.timeml.RELATED_TO_EVENT_INSTANCE
SUBORDINATED_EVENT_INSTANCE = LIBRARY.timeml.SUBORDINATED_EVENT_INSTANCE
MAKEINSTANCE = 'MAKEINSTANCE'
TIMEML_TAGS = (TIMEX, EVENT, MAKEINSTANCE, SIGNAL, ALINK, SLINK, TLINK)
### CONVERTING TIMEBANK INTO TTK
def convert_timebank(timebank_dir, out_dir):
"""Take the LDC TimeBank files in timebank_dir and create timebank files in
out_dir that are in the TTK format and do not have MAKEINSTANCE tags."""
# make the paths absolute so we do not get bitten by Tarsqi's habit of
# changing the current directory
timebank_dir = os.path.abspath(timebank_dir)
out_dir = os.path.abspath(out_dir)
_makedir(out_dir)
for fname in os.listdir(timebank_dir):
if fname.endswith('.tml'):
print(fname)
_convert_timebank_file(os.path.join(timebank_dir, fname),
os.path.join(out_dir, fname))
break
def _convert_timebank_file(infile, outfile):
tarsqidoc = _get_tarsqidoc(infile, "timebank")
for tagname in TIMEML_TAGS:
tarsqidoc.tags.import_tags(tarsqidoc.sourcedoc.tags, tagname)
tarsqidoc.sourcedoc.tags.remove_tags(tagname)
events = tarsqidoc.tags.find_tags(EVENT)
instances = tarsqidoc.tags.find_tags(MAKEINSTANCE)
instances = { i.attrs.get(EVENTID): i for i in instances }
for event in events:
instance = instances[event.attrs[EID]]
del instance.attrs[EVENTID]
event.attrs.update(instance.attrs)
tarsqidoc.tags.remove_tags(MAKEINSTANCE)
tarsqidoc.print_all(outfile)
### CONVERTING THYME INTO TTK
def convert_thyme(thyme_text_dir, thyme_anno_dir, out_dir, limit=sys.maxsize):
thyme_text_dir = os.path.abspath(thyme_text_dir)
thyme_anno_dir = os.path.abspath(thyme_anno_dir)
out_dir = os.path.abspath(out_dir)
_makedir(out_dir)
count = 0
for fname in os.listdir(thyme_anno_dir):
count += 1
if count > limit:
break
thyme_text_file = os.path.join(thyme_text_dir, fname)
out_file = os.path.join(out_dir, fname)
# in the annotations the file is actually a directory of annotations
anno_files = os.listdir(os.path.join(thyme_anno_dir, fname))
timeml_files = [f for f in anno_files if f.find('Temporal') > -1]
if timeml_files:
#if not fname == "ID090_clinic_265": continue
#if not fname == "ID090_path_266a": continue
print(fname)
thyme_anno_file = os.path.join(thyme_anno_dir, fname, timeml_files[0])
try:
_convert_thyme_file(thyme_text_file, thyme_anno_file, out_file)
except:
print("WARNING: error on %s" % fname)
def _convert_thyme_file(thyme_text_file, thyme_anno_file, out_file):
LinkID.reset()
tarsqidoc = _get_tarsqidoc(thyme_text_file, "text")
dom = minidom.parse(thyme_anno_file)
entities = [Entity(e) for e in dom.getElementsByTagName('entity')]
relations = [Relation(r) for r in dom.getElementsByTagName('relation')]
doctimes = [e for e in entities if e.type == 'DOCTIME']
sectiontimes = [e for e in entities if e.type == 'SECTIONTIME']
events = [e for e in entities if e.type == 'EVENT']
timexes = [e for e in entities if e.type == 'TIMEX3']
alinks = [r for r in relations if r.type == 'ALINK']
tlinks = [r for r in relations if r.type == 'TLINK']
event_idx = {}
timex_idx = {}
metadata = {'dct': None}
timexes = doctimes + sectiontimes + timexes
_add_timexes_to_tarsqidoc(timexes, timex_idx, metadata, tarsqidoc)
_add_events_to_tarsqidoc(events, event_idx, metadata['dct'], tarsqidoc)
_add_links_to_tarsqidoc(alinks + tlinks, timex_idx, event_idx, tarsqidoc)
tarsqidoc.print_all(out_file)
def _add_timexes_to_tarsqidoc(timexes, timex_idx, metadata, tarsqidoc):
for timex in timexes:
try:
begin, end = timex.span.split(',')
if timex.id in timex_idx:
print("WARNING: timex %s already exists" % timex.id)
timex_idx[timex.id] = begin
attrs = { TID: timex.id }
if timex.type == 'DOCTIME':
metadata['dct'] = timex
attrs['functionInDocument'] = 'DOCTIME'
doctime = tarsqidoc.text(int(begin), int(end))
month, day, year = doctime.split('/')
dct_value = "%04d%02d%02d" % (int(year), int(month), int(day))
tarsqidoc.metadata['dct'] = dct_value
elif timex.type == 'SECTIONTIME':
attrs['functionInDocument'] = 'SECTIONTIME'
tarsqidoc.sourcedoc.tags.add_tag('TIMEX3', begin, end, attrs)
except ValueError:
print("Skipping discontinuous timex")
def _add_events_to_tarsqidoc(events, event_idx, dct, tarsqidoc):
"""Add an event from the Thyme file. Also includes adding a TLINK to the DCT,
for this link we generate a new link identifier."""
dct_rel_id = 0
for event in events:
try:
begin, end = event.span.split(',')
if event.id in event_idx:
print("WARNING: event %s already exists" % event.id)
event_idx[event.id] = begin
# TODO: is it okay for these to be the same?
attrs = { EID: event.id, EIID: event.id}
tarsqidoc.sourcedoc.tags.add_tag('EVENT', begin, end, attrs)
dct_rel_id += 1
if dct is not None:
attrs = { LID: next(LinkID), #LID: next(LinkID),
RELTYPE: event.DocTimeRel,
EVENT_INSTANCE_ID: event.id,
RELATED_TO_TIME: dct.id }
tarsqidoc.sourcedoc.tags.add_tag('TLINK', None, None, attrs)
except ValueError:
print("Skipping discontinuous event")
def _add_links_to_tarsqidoc(links, timex_idx, event_idx, tarsqidoc):
"""Add a link from the Thyme file. Inherit the identifier on the Thyme
relation, even though it does not adhere to TimeML id formatting."""
for rel in links:
linkid = "r%s" % rel.id.split('@')[0]
sourceid = "%s%s" % (rel.Source.split('@')[1], rel.Source.split('@')[0])
targetid = "%s%s" % (rel.Target.split('@')[1], rel.Target.split('@')[0])
attrs = {
LID: linkid,
_source_attr_name(rel.type, sourceid, timex_idx, event_idx): sourceid,
_target_attr_name(rel.type, targetid, timex_idx, event_idx): targetid,
RELTYPE: rel.RelType}
tarsqidoc.sourcedoc.tags.add_tag(rel.type, None, None, attrs)
def _source_attr_name(link_type, source_id, timex_idx, event_idx):
if link_type == ALINK:
return EVENT_INSTANCE_ID
elif source_id in timex_idx:
return TIME_ID
elif source_id in event_idx:
return EVENT_INSTANCE_ID
else:
print("WARNING: cannot find attribute name for %s" % source_id)
def _target_attr_name(link_type, target_id, timex_idx, event_idx):
if link_type == ALINK:
return RELATED_TO_EVENT_INSTANCE
elif target_id in timex_idx:
return RELATED_TO_TIME
elif target_id in event_idx:
return RELATED_TO_EVENT_INSTANCE
else:
print("WARNING: cannot find attribute name for %s" % target_id)
class Entity(object):
"""An entity from a Thyme annotation, either an event or a timex (note that
a timex can be a DOCTIME or SECTIONTIME type)."""
def __init__(self, dom_element):
self.id = get_simple_value(dom_element, 'id')
self.span = get_simple_value(dom_element, 'span')
self.type = get_simple_value(dom_element, 'type')
self.properties = get_value(dom_element, 'properties')
self.id = "%s%s" % (self.id.split('@')[1], self.id.split('@')[0])
if self.type == EVENT:
self.DocTimeRel = get_simple_value(self.properties, 'DocTimeRel')
self.Polarity = get_simple_value(self.properties, 'Polarity')
elif self.type == TIMEX:
self.Class = get_simple_value(self.properties, 'Class')
def __str__(self):
if self.type == EVENT:
return "<%s id=%s span=%s DocTimeRel=%s Polarity=%s>" % \
(self.type, self.id, self.span, self.DocTimeRel, self.Polarity)
elif self.type == TIMEX:
return "<%s id=%s span=%s Class=%s>" % \
(self.type, self.id, self.span, self.Class)
else:
return "<%s id=%s span=%s>" % \
(self.type, self.id, self.span)
class Relation(object):
def __init__(self, dom_element):
self.id = get_simple_value(dom_element, 'id')
self.type = get_simple_value(dom_element, 'type')
self.properties = get_value(dom_element, 'properties')
self.Source = get_simple_value(self.properties, 'Source')
self.RelType = get_simple_value(self.properties, 'Type')
self.Target = get_simple_value(self.properties, 'Target')
def __str__(self):
return "<%s id=%s %s(%s,%s)>" % \
(self.type, self.id, self.RelType, self.Source, self.Target)
class LinkID(object):
"""Class to provide fresh identifiers for TLINK tags."""
# TODO: should probably combine this with TagID in the preprocessor wrapper
IDENTIFIER = 0
@classmethod
def next(cls):
cls.IDENTIFIER += 1
return "l%d" % cls.IDENTIFIER
@classmethod
def reset(cls):
cls.IDENTIFIER = 0
def get_value(entity, attr):
return entity.getElementsByTagName(attr)[0]
def get_simple_value(entity, attr):
return entity.getElementsByTagName(attr)[0].firstChild.data
### CONVERTING KNOWTATOR INTO TTK
class KnowtatorConverter(object):
"""Class responsible for converting two Knowtator files (a text file and an
annotation file) into a TTK file."""
def __init__(self, text_file=None, annotation_file=None, ttk_file=None):
"""Initialize input and output file names. The input is a text file and
annotation file, the put is a ttk file. If no annotation file name is
given, it will be created from the text file name using the default
extentions."""
self.text_file = os.path.abspath(text_file)
self.ttk_file = os.path.abspath(ttk_file)
if annotation_file is None:
self.anno_file = self.text_file + '.knowtator.xml'
else:
self.anno_file = os.path.abspath(annotation_file)
def convert(self, tarsqi_tags):
"""Reads the knowtator data and saves them as a TTK file."""
self.read()
self.export(tarsqi_tags)
def read(self):
"""Read all annotations and put all information (including attributes and
relations) in the annotations instance variable."""
self.dom = minidom.parse(self.anno_file)
self._read_annotations()
self._read_stringSlotMentions()
self._read_classMentions()
self._read_complexSlotMention()
self._enrich_annotations()
def _read_annotations(self):
"""Reads the annotation tags, which ontain the identifier, character offsets and
the text."""
self.annotations = {}
for ann_dom in self.dom.getElementsByTagName('annotation'):
annotation = KnowtatorAnnotation(ann_dom)
self.annotations[annotation.mention_id] = annotation
def _read_stringSlotMentions(self):
"""Reads the stringSlotMention tags, which contain the attributes."""
self.string_slot_mentions = {}
for ssm_dom in self.dom.getElementsByTagName('stringSlotMention'):
ssm = KnowtatorStringSlotMention(ssm_dom)
self.string_slot_mentions[ssm.mention_id] = ssm
def _read_classMentions(self):
"""Reads the classMention tags, which have the class (tagname) and links to
attributes and relations."""
self.class_mentions = {}
for cm_dom in self.dom.getElementsByTagName('classMention'):
cm = KnowtatorClassMention(cm_dom)
self.class_mentions[cm.mention_id] = cm
def _read_complexSlotMention(self):
"""Reads the complexSlotMention tags, which contain the relations."""
self.complex_slot_mentions = {}
for csm_dom in self.dom.getElementsByTagName('complexSlotMention'):
csm = KnowtatorComplexSlotMention(csm_dom)
self.complex_slot_mentions[csm.mention_id] = csm
def _enrich_annotations(self):
"""Adds information from other tags to the annotation tags."""
for cm in self.class_mentions.values():
anno = self.annotations[cm.mention_id]
anno.classname = cm.classname
for sm in cm.slot_mentions:
ssm = self.string_slot_mentions.get(sm)
if ssm is not None:
# add the attributes
anno.attributes[ssm.att] = ssm.val
else:
# add the relations
csm = self.complex_slot_mentions.get(sm)
anno.relations.append([csm.attribute, csm.csm_value])
def export(self, tarsqi_tags):
"""Saves all annotations in a TTK file."""
tarsqidoc = _get_tarsqidoc(self.text_file, "text")
for annotation in self.annotations.values():
tags = []
tag = annotation.as_ttk_tag()
tags.append(tag)
for rel in annotation.relations:
att1 = 'timeID' if annotation.classname == 'Timex3' else 'eventID'
val1 = tag.attrs.get('tid', tag.attrs.get('eiid'))
target = self.annotations[rel[1]]
target_tag = target.as_ttk_tag()
att2 = RELATED_TO_TIME
if target_tag.name == EVENT:
att2 = RELATED_TO_EVENT_INSTANCE
val2 = target_tag.attrs.get(TID, target_tag.attrs.get(EIID))
feats = { 'relType': rel[0], att1: val1, att2: val2 }
tags.append(Tag(TLINK, -1, -1, feats))
tagrepo = tarsqidoc.tags if tarsqi_tags else tarsqidoc.sourcedoc.tags
for t in tags:
tagrepo.append(t)
tarsqidoc.print_all(self.ttk_file)
def pretty_print(self):
for annotation in sorted(self.annotations.values()):
print()
annotation.pretty_print()
def pp_csms(self):
for key in sorted(self.complex_slot_mentions):
print(self.complex_slot_mentions[key])
def pp_ssms(self):
for key in sorted(self.string_slot_mentions):
print(self.string_slot_mentions[key])
def pp_cms(self):
for key in sorted(self.class_mentions):
print(self.class_mentions[key])
class KnowtatorAnnotation(object):
"""Implements the object for <annotation> tags, which contain just the text
span, but enriches them with information from the other tags. Instance
variables are:
mention_id - unique id, taken from the id attribute of the mention tag
start - from start attribute of span tag
end - from end attribute of span tag
text - cdata of spannedText tag
classname - taken from the classMention tag
attributes - taken from the classMention and stringSlotMention tags
relations - taken from the classMention and complexSlotMention tags
Here is an example of an annotation XML tag:
<annotation>
<mention id="EHOST_Instance_95" />
<annotator id="eHOST_2010">Ruth</annotator>
<span start="27" end="45" />
<spannedText>September 29, 2005</spannedText>
<creationDate>Fri Jul 07 14:17:59 CDT 2017</creationDate>
</annotation>
"""
@classmethod
def tag(cls, tag_identifier, tag, spanned_text):
"""This acts as a factory method that given some arguments creates an XML string
for a Knowtator annotation tag."""
return \
'<annotation>\n' + \
' <mention id="EHOST_Instance_%s" />\n' % tag_identifier + \
' <annotator id="%s">TTK</annotator>\n' % open('VERSION').read().strip() + \
' <span start="%s" end="%s" />\n' % (tag.begin, tag.end) + \
' <spannedText>%s</spannedText>\n' % spanned_text + \
' <creationDate>%s</creationDate>\n' % time.strftime("%Y%m%d", time.localtime())+ \
'</annotation>\n'
def __init__(self, annotation):
"""Reads the relevant information from the DOM object. Assumes there is
only one mention, span and spanned text."""
mention = annotation.getElementsByTagName('mention')[0]
span = annotation.getElementsByTagName('span')[0]
text = annotation.getElementsByTagName('spannedText')[0]
self.mention_id = mention.getAttribute('id')
self.start = int(span.getAttribute('start'))
self.end = int(span.getAttribute('end'))
self.text = text.firstChild.data
self.classname = None
self.attributes = {}
self.relations = []
def __eq__(self, other):
return self.start == other.start
def __ne__(self, other):
return self.start != other.start
def __lt__(self, other):
return self.start < other.start
def __le__(self, other):
return self.start <= other.start
def __gt__(self, other):
return self.start > other.start
def __ge__(self, other):
return self.start >= other.start
def __str__(self):
return "<annotation %s %s %s-%s '%s'>" \
% (self.mention_id, self.classname, self.start, self.end, self.text)
def as_ttk_tag(self):
tagname = self.classname.upper()
identifier = self.mention_id[15:]
if tagname == 'EVENT':
feats = { 'class': self.attributes['classType'],
'eid': 'e' + identifier,
'eiid': 'ei' + identifier }
elif tagname == 'TIMEX3':
# TODO: value is not the right format
feats = { 'type': self.attributes['typeInfo'],
'value': self.attributes['value'],
'tid': 't' + identifier }
else:
feats = {}
return Tag(tagname, self.start, self.end, feats)
def pretty_print(self):
print(self)
for att, val in self.attributes.items():
print(" %s=%s" % (att, val))
for relType, target in self.relations:
print(" %s %s" % (relType, target))
class KnowtatorClassMention(object):
"""Implements the objects for <classMention> tags, which contains the tag name
and links annotations to attributes and relations. Fields are:
mentiod_id - value of the id attribute
classname - the id attribute of the mentionClass tag
slot_mentions - list from the id attribute of the hasSlotMention tags
A hasSlotMention tag points to either a stringSlotMention tag, which
contains an attribute-value pair, or to a complexSlotMention, which contains
a relation and points to an annotation. The classname is the tagname, for
example 'Event'.
XML example:
<classMention id="EHOST_Instance_95">
<hasSlotMention id="EHOST_Instance_110" />
<hasSlotMention id="EHOST_Instance_111" />
<mentionClass id="Timex3">September 29, 2005</mentionClass>
</classMention>
"""
@classmethod
def tag(cls, tag_identifier, tag, mention_class, spanned_text, slot_mentions):
"""Factory method for creating MentionClass XML strings."""
has_slot_mention_tags = []
for sm in slot_mentions:
has_slot_mention_tags.append(
'<hasSlotMention id="EHOST_Instance_%s" />\n' % sm)
return \
'<classMention id="EHOST_Instance_%s">\n' % tag_identifier + \
' ' + ' '.join(has_slot_mention_tags) + \
' <mentionClass id="%s">%s</mentionClass>\n' % (mention_class, spanned_text) + \
'</classMention>\n'
def __init__(self, cm):
self.mention_id = cm.getAttribute('id')
mention_class = cm.getElementsByTagName('mentionClass')[0]
slot_mentions = cm.getElementsByTagName('hasSlotMention')
self.classname = mention_class.getAttribute('id')
self.slot_mentions = [sm.getAttribute('id') for sm in slot_mentions]
def __str__(self):
return "<classMention %s %s %s>" \
% (self.mention_id, self.classname, ' '.join(self.slot_mentions))
class KnowtatorStringSlotMention(object):
"""Implements the object for the <stringSlotMentionTag> tags, which contain
attributes and their values. The fields are:
mention_id - the value of the id attribute
att - the id attribute of the mentionSlot tag
val - the value attribute of the stringSlotMentionValue tag
Example XML tag:
<stringSlotMention id="EHOST_Instance_111">
<mentionSlot id="value" />
<stringSlotMentionValue value="09/29/2005" />
</stringSlotMention>
"""
@classmethod
def tag(cls, identifier, attribute, value):
"""Factory method to generate an XML string for the stringSlotMention tag from
an identifier, attribute and value."""
return \
'<stringSlotMention id="EHOST_Instance_%s">\n' % identifier + \
' <mentionSlot id="%s" />\n' % attribute + \
' <stringSlotMentionValue value="%s" />\n' % value + \
'</stringSlotMention>\n'
def __init__(self, ssm):
"""Reads a DOM Element with tagName=stringSlotMention."""
mention_slot = ssm.getElementsByTagName('mentionSlot')[0]
ssm_value = ssm.getElementsByTagName('stringSlotMentionValue')[0]
self.mention_id = ssm.getAttribute('id')
self.att = mention_slot.getAttribute('id')
self.val = ssm_value.getAttribute('value')
def __str__(self):
return "<stringSlotMention %s %s=%s>" \
% (self.mention_id, self.att, self.val)
class KnowtatorComplexSlotMention(object):
"""Implements the object for <complexSlotMention> tags, which contain the
relations. Fields are:
mention_slot - the id attribute of the mentionSlot tag
attribute - the cdata of the attribute tag
csm_value - the value attribute of the complexSlotMentionValue tag
The id links back to the classMention which links this tag to an
annotation. The attribute has an id (always TLINK for tlinks) and uses cdata
for the value. The csm_value points to another annotation.
XML tag example:
<complexSlotMention id="EHOST_Instance_115">
<mentionSlot id="TLINK" />
<attribute id="relType">DURING</attribute>
<complexSlotMentionValue value="EHOST_Instance_98" />
</complexSlotMention>
"""
@classmethod
def tag(cls, identifier, reltype, target_identifier):
"""Factory method for complexSlotMention XML strings."""
return \
'<complexSlotMention id="EHOST_Instance_%s">\n' % identifier + \
' <mentionSlot id="TLINK" />\n' + \
' <attribute id="relType">%s</attribute>\n' % reltype + \
' <complexSlotMentionValue value="EHOST_Instance_%s" />\n' % target_identifier + \
'</complexSlotMention>\n'
def __init__(self, csm):
self.mention_id = csm.getAttribute('id')
mention_slot = csm.getElementsByTagName('mentionSlot')[0]
attribute = csm.getElementsByTagName('attribute')[0]
csm_value = csm.getElementsByTagName('complexSlotMentionValue')[0]
self.mention_slot = mention_slot.getAttribute('id')
self.attribute = attribute.firstChild.data
self.csm_value = csm_value.getAttribute('value')
def __str__(self):
return "<complexSlotMention %s %s %s %s>" \
% (self.mention_id, self.mention_slot, self.attribute, self.csm_value)
def convert_knowtator(knowtator_dir, ttk_dir, limit, tarsqi_tags=False):
"""Convert pairs of Knowtator files (source plus annotations) into single TTK
files. This just takes care of figuring out the individual files in the
directories and then lets KnowtatorCOnverter do the work."""
knowtator_dir = os.path.abspath(knowtator_dir)
ttk_dir = os.path.abspath(ttk_dir)
_makedir(ttk_dir)
count = 0
# Read the list of file names. Note that with Knowtator we have a separate
# annotation file in addition to the source file: for each source file named
# 'file.txt' we also have an annotations file named 'file.txt.knowtator.xml'.
fnames = os.listdir(knowtator_dir)
fnames = [f for f in fnames if not f.endswith('knowtator.xml')]
for fname in fnames:
count += 1
if count > limit:
break
print(fname)
source_file = os.path.join(knowtator_dir, fname)
anno_file = os.path.join(knowtator_dir, fname + '.knowtator.xml')
# this assumes the .txt extension and replaces it with .ttk
ttk_fname = fname[:-3] + 'ttk'
ttk_file = os.path.join(ttk_dir, ttk_fname)
converter = KnowtatorConverter(text_file=source_file,
annotation_file=anno_file,
ttk_file=ttk_file)
converter.convert(tarsqi_tags)
### CONVERTING TTK INTO HTML
def convert_ttk_dir_into_html(ttk_dir, html_dir, showlinks, limit):
ttk_dir = os.path.abspath(ttk_dir)
html_dir = os.path.abspath(html_dir)
_makedir(html_dir)
print(ttk_dir)
print(html_dir)
index = open(os.path.join(html_dir, 'index.html'), 'w')
count = 0
for fname in os.listdir(ttk_dir):
count += 1
if count > limit:
break
ttk_file = os.path.join(ttk_dir, fname)
html_file = os.path.join(html_dir, fname + '.html')
index.write("<li><a href=%s.html>%s.html</a></li>\n" % (fname, fname))
convert_ttk_file_into_html(ttk_file, html_file, showlinks)
def convert_ttk_file_into_html(ttk_file, html_file, showlinks):
print("creating %s" % html_file)
ttk_file = os.path.abspath(ttk_file)
html_file = os.path.abspath(html_file)
tarsqidoc = _get_tarsqidoc(ttk_file, "ttk")
event_idx = _get_events(tarsqidoc)
timex_idx = _get_timexes(tarsqidoc)
entity_idx = _get_entities(event_idx, timex_idx)
link_idx = _get_links(tarsqidoc)
fh = _open_html_file(html_file)
count = 0
previous_was_space = False
current_sources = []
fh.write("<tr>\n<td>\n")
for char in tarsqidoc.sourcedoc.text:
if count in event_idx['close']:
_write_closing_tags(event_idx, count, 'event', fh, showlinks)
if count in timex_idx['close']:
_write_closing_tags(timex_idx, count, 'timex', fh, showlinks)
if count in event_idx['open']:
_write_opening_tags(event_idx, count, 'event', fh)
current_sources.append(event_idx['open'][count][0])
if count in timex_idx['open']:
_write_opening_tags(timex_idx, count, 'timex', fh)
current_sources.append(timex_idx['open'][count][0])
if char == "\n":
if previous_was_space and showlinks and current_sources:
fh.write("<tr><td width=40%>\n")
for entity in current_sources:
identifier = 'tid' if entity.name == 'TIMEX3' else 'eiid'
for link in link_idx.get(entity.attrs[identifier], []):
_write_link(link, entity_idx, fh)
fh.write("\n<tr valign=top>\n<td>\n")
previous_was_space = False
current_sources = []
else:
fh.write("<br/>\n")
previous_was_space = True
else:
fh.write(char)
count += 1
def _get_events(tarsqidoc):
"""Return an index of events indexed on the begin and end offset."""
events = tarsqidoc.tags.find_tags('EVENT')
event_idx = {'open': {}, 'close': {}}
for event in events:
event_idx['open'].setdefault(event.begin, []).append(event)
event_idx['close'].setdefault(event.end, []).append(event)
return event_idx
def _get_timexes(tarsqidoc):
"""Return an index of times indexed on the begin and end offset."""
timexes = tarsqidoc.tags.find_tags('TIMEX3')
timex_idx = {'open': {}, 'close': {}}
for timex in timexes:
timex_idx['open'].setdefault(timex.begin, []).append(timex)
timex_idx['close'].setdefault(timex.end, []).append(timex)
return timex_idx
def _get_entities(event_idx, timex_idx):
"""Return an index of all entities indexed on the event or timex id."""
entity_idx = {}
for elist in list(event_idx['open'].values()) + list(timex_idx['open'].values()):
entity = elist[0]
identifier = 'tid' if entity.name == 'TIMEX3' else 'eiid'
entity_idx[entity.attrs[identifier]] = entity
return entity_idx
def _get_links(tarsqidoc):
links = {}
for link in tarsqidoc.slinks() + tarsqidoc.tlinks():
source = link.attrs.get('timeID') \
or link.attrs.get('eventInstanceID')
target = link.attrs.get('relatedToTime') \
or link.attrs.get('relatedToEventInstance') \
or link.attrs.get('subordinatedEventInstance')
if source is None: print("WARNING, no source for %s" % link)
if target is None: print("WARNING, no target for %s" % link)
links.setdefault(source, []).append([link.attrs['lid'], source,
link.attrs['relType'], target])
return links
def _open_html_file(html_file):
fh = codecs.open(html_file, 'w', encoding="utf8")
fh.write("<html>\n<body>\n" +
"<style>\n" +
"body { font-size: 14pt; }\n" +
"sup { font-size: 10pt; font-weight: normal; }\n" +
"td { padding-top: 10pt; }\n" +
"event { xfont-weight: bold; color: darkred; }\n" +
"timex { xfont-weight: bold; color: darkblue; }\n" +
".link { color: darkgreen; }\n" +
"</style>\n" +
"<body>\n" +
"<table cellspacing=0 border=0>\n")
return fh
def _write_event_close(event, fh, showlinks):
if showlinks:
fh.write("<sup>%s:%s</sup></event>" % (event.eid, event.begin))
else:
fh.write("<sup>%s</sup></event>" % event.eid)
def _write_closing_tags(idx, count, tagname, fh, showlinks):
entities = idx['close'][count]
for entity in reversed(entities):
# for an identifier try the eid or tid
identifier = entity.attrs.get('eid') or entity.attrs.get('tid')
if showlinks:
fh.write("<sup>%s:%s</sup></%s>]" % (identifier, entity.begin, tagname))
else:
#fh.write("<sup>%s</sup></%s>]" % (identifier, tagname))
fh.write("<sup>%s</sup></%s>]" % (entity.begin, tagname))
def _write_opening_tags(idx, count, tagname, fh):
entities = idx['open'][count]
for entity in entities:
fh.write("[<%s>" % tagname)
def _write_link(link, entity_idx, fh):
link_id = link[0]
reltype = link[2]
source_id = link[1]
target_id = link[3]
source_entity = entity_idx.get(source_id)
source_begin = source_entity.begin
target_entity = entity_idx.get(target_id)
target_begin = target_entity.begin
fh.write("<span class=link id=%s>[%s:%s %s %s:%s]</span>\n"
% (link_id, source_id, source_begin,
reltype.lower(), target_id, target_begin))
if target_entity is None:
print("WARNING: %s %s %s %s" % (link_id, source_id, reltype, target_id))
### CONVERTING TTK FILE INTO KNOWTATOR FORMAT
def convert_ttk_into_knowtator(ttk_file, text_file, annotation_file):
print("creating %s" % annotation_file)
ttk_file = os.path.abspath(ttk_file)
text_file = os.path.abspath(text_file)
annotation_file = os.path.abspath(annotation_file)
tarsqidoc = _get_tarsqidoc(ttk_file, "ttk")
full_text = tarsqidoc.sourcedoc.text
with codecs.open(text_file, 'w', encoding="utf-8") as text:
text.write(full_text)
with codecs.open(annotation_file, 'w', encoding="utf-8") as anno:
anno.write('<?xml version="1.0" encoding="UTF-8"?>\n')
anno.write('<annotations textSource="%s">\n' % os.path.basename(text_file))
tag_index = _create_tag_index(tarsqidoc.tags.tags)
for tag in tarsqidoc.tags.tags:
_knowtator_convert_tag(tag, tag_index, full_text, anno)
anno.write('</annotations>\n')
def _create_tag_index(tags):
"""Create an index for the event and timex tags. The keys in this index are the
eid or tid and the values are pairs of the tag itself and a list of tlinks for
which this tag is a source."""
tag_index = {}
tlink_index = {}
tlink_tags = []
for tag in tags:
tag_id = tag.get_identifier()
if tag.name.upper() in (EVENT, TIMEX):
tag_index[tag_id] = [tag]
elif tag.name.upper() in (TLINK,):
tlink_tags.append(tag)
for tag in tlink_tags:
source_identifier = tag.attrs.get(TIME_ID,
tag.attrs.get(EVENT_INSTANCE_ID))
tlink_index.setdefault(source_identifier, []).append(tag)
for tag_identifier in tag_index:
tlinks = tlink_index.get(tag_identifier, [])
tag_index[tag_identifier].append(tlinks)
#_print_tag_index(tag_index)
return tag_index
def _print_tag_index(tag_index):
for identifier in tag_index:
print("%s\n %s" % (identifier, tag_index[identifier][0]))
for tlink in tag_index[identifier][1]:
print (" %s" % tlink)
print()
def _knowtator_convert_tag(tag, tag_index, text, fh):
"""Take the Tag instance and generate Knowtator XML tags for it."""
tag_id = tag.get_identifier()
# only looping over events and timexes, link tags are derived from them
if tag.name.upper() in {TIMEX, EVENT}:
classname = tag.name
string_slot_mentions = [(KnowtatorID.new_identifier(), attr, val)
for attr, val in tag.attrs.items()]
spanned_text = text[tag.begin:tag.end]
annotation = KnowtatorAnnotation.tag(tag_id, tag, spanned_text)
ssm_tags = _knowtator_stringSlotMention_tags(string_slot_mentions)
complex_slot_mentions = []
# pull the links out of the index and create complex slot mentions for them
for link in tag_index[tag_id][1]:
target_id = link.attrs.get(RELATED_TO_EVENT_INSTANCE,
link.attrs.get(RELATED_TO_TIME))
complex_slot_mentions.append(
(KnowtatorID.new_identifier(), link.attrs.get('relType'), target_id))
csm_tags = _knowtator_complexSlotMention_tags(complex_slot_mentions)
slot_mentions = [sm[0] for sm in string_slot_mentions + complex_slot_mentions]
class_mention = KnowtatorClassMention.tag(
tag_id, tag, classname, spanned_text, slot_mentions)
fh.write(annotation + ''.join(ssm_tags) + ''.join(csm_tags) + class_mention)
def _knowtator_stringSlotMention_tags(string_slot_mentions):
def ssm_tag(ssm):
identifier, attribute, value = ssm
return KnowtatorStringSlotMention.tag(identifier, attribute, value)
return [ssm_tag(ssm) for ssm in string_slot_mentions]
def _knowtator_complexSlotMention_tags(complex_slot_mentions):
def csm_tag(csm):
identifier, reltype, target_id = csm
return KnowtatorComplexSlotMention.tag(identifier, reltype, target_id)
return [csm_tag(csm) for csm in complex_slot_mentions]
class KnowtatorID(object):
"""Just a class to generate identifiers."""
identifier = 0
@classmethod
def new_identifier(cls):
cls.identifier += 1
return cls.identifier
### 6. CONVERT ECB INTO TTK
class ECBConverter(object):
def __init__(self, ecb_directory, ttk_directory):
"""Collect specifications for each ECB file, which includes the ecb directory,
the target directory (used to write converted files), the topic name and the
filename (includes the topic name, for example "1/7.ecb)."""
self.ecb_directory = ecb_directory
self.ttk_directory = ttk_directory
self.ecb_specs = []
self.ecb_files = []
self.topics = {}
filepaths = glob.glob(os.path.join(self.ecb_directory, 'data', '*', '*.ecb'))
for fp in filepaths:
datadir, fname = os.path.split(fp)
datadir, topic = os.path.split(datadir)
fname = os.path.join(topic, fname)
self.ecb_specs.append((ecb_directory, ttk_directory, topic, fname))
def convert(self, topic=None):
"""Convert TTK files into ECB files. Use the topic argument to limit processing
to one topic, the value can be an integer from 1 to 45 or a string representation
of that integer."""
if topic is not None:
# turn the topic into a string if it isn't one yet
topic = "%s" % topic
specs = [spec for spec in self.ecb_specs if spec[2] == topic]
else:
specs = self.ecb_specs
print("Converting %d files..." % len(specs))
for (ecb_directory, ttk_directory, topic, fname) in specs:
ecb_file = ECBFile(ecb_directory, ttk_directory, topic, fname)
self.ecb_files.append(ecb_file)
print(" %s" % ecb_file)
self._populate_topics()
self._write_files()
def _write_files(self):
for ecb_file in self.ecb_files:
ecb_file.write()
def _populate_topics(self):
for ecb_file in self.ecb_files:
self.topics.setdefault(ecb_file.topic, []).append(ecb_file)
def print_topics(self):
for topic, ecb_files in self.topics.items():
print(topic)
for ecb_file in ecb_files:
print(" %s" % ecb_file)
class ECBFile(object):
"""An ECBFile is an intermediary object used by the ECBConverter. It is given
the ECB and TTK directories, the topic identifier and the filename and then
creates a TarsqiDocument for the ECB file. """
def __init__(self, ecb_directory, ttk_directory, topic, fname):
self.topic = topic
self.ecb_file = os.path.join(ecb_directory, 'data', fname)
self.ttk_file = os.path.join(ttk_directory, 'data', fname)
self.tarsqidoc = _get_tarsqidoc_from_ecb_file(self.ecb_file)
self.tarsqidoc.sourcedoc.filename = self.ecb_file
# here we fake a pipeline for the metadata
pipeline = [ProcessingStep(pipeline=[("ECB_CONVERTER", None)])]
self.tarsqidoc.metadata['processing_steps'] = pipeline
# and store the topic id since we do not to want to rely just on the
# directory structure
self.tarsqidoc.metadata['topic'] = topic
def __str__(self):
return "<ECBFile topic=%s %s>" % (self.topic, self.ecb_file)
def write(self):
path, fname = os.path.split(self.ttk_file)
if not os.path.exists(path):
os.makedirs(path)
self.tarsqidoc.print_all(self.ttk_file)
### UTILITIES
def _makedir(directory):
if os.path.exists(directory):
exit("ERROR: directory already exists")
else:
os.makedirs(directory)
def _get_tarsqidoc(infile, source, metadata=True):
"""Return an instance of TarsqiDocument for infile"""
opts = [("--source-format", source), ("--trap-errors", "False")]
t = tarsqi.Tarsqi(opts, infile, None)
t.source_parser.parse_file(t.input, t.tarsqidoc)
t.metadata_parser.parse(t.tarsqidoc)
return t.tarsqidoc
def _get_tarsqidoc_from_ecb_file(infile):
"""Return an instance of TarsqiDocument for infile. This is a special case of
_get_tarsqidoc() for ECB files since those do not allow us to use a source
parser in the regular way since ECB files are neither of the text type or
the xml type."""
opts = [("--source-format", "xml"), ("--trap-errors", "False")]
t = tarsqi.Tarsqi(opts, infile, None)
# create an XML string from the ECB file
with codecs.open(t.input, encoding="utf8") as fh:
content = "<text>%s</text>" % fh.read().replace('&', '&')
t.source_parser.parse_string(content, t.tarsqidoc)
t.metadata_parser.parse(t.tarsqidoc)
for mention in t.tarsqidoc.sourcedoc.tags.find_tags('MENTION'):
# this is somewhat dangerous because we are not checking whether there
# is a double quote in the string, but those do not happen to occur
text = t.tarsqidoc.text(mention.begin, mention.end)
mention.attrs["text"] = text
return t.tarsqidoc
if __name__ == '__main__':
long_options = ['timebank2ttk', 'thyme2ttk', 'ttk2html',
'knowtator2ttk', 'ttk2knowtator', 'ecb2ttk',
'tarsqi', 'show-links']
(opts, args) = getopt.getopt(sys.argv[1:], 'i:o:', long_options)
opts = { k: v for k, v in opts }
limit = 10 if DEBUG else sys.maxsize
if '--timebank2ttk' in opts:
convert_timebank(args[0], args[1])
elif '--thyme2ttk' in opts:
convert_thyme(args[0], args[1], args[2], limit)
elif '--knowtator2ttk' in opts:
tarsqi_tags = True if '--tarsqi' in opts else False
if os.path.isfile(args[0]):
if os.path.exists(args[1]):
exit("ERROR: output file already exists")
converter = KnowtatorConverter(text_file=args[0], ttk_file=args[1])
converter.convert(tarsqi_tags)
elif os.path.isdir(args[0]):
if os.path.exists(args[1]):
exit("ERROR: output directory already exists")
convert_knowtator(args[0], args[1], limit, tarsqi_tags)
else:
exit("ERROR: input is not a file or directory")
elif '--ttk2html' in opts:
limit = 10 if DEBUG else sys.maxsize
showlinks = True if '--show-links' in opts else False
if os.path.exists(args[1]):
exit("ERROR: output '%s' already exists" % args[1])
elif os.path.isdir(args[0]):
convert_ttk_dir_into_html(args[0], args[1], showlinks, limit)
elif os.path.isfile(args[0]):
convert_ttk_file_into_html(args[0], args[1], showlinks)
else:
exit("ERROR: incorrect input")
elif '--ttk2knowtator' in opts:
convert_ttk_into_knowtator(args[0], args[1], args[2])
elif '--ecb2ttk' in opts:
indir = os.path.abspath(args[0])
outdir = os.path.abspath(args[1])
ECBConverter(indir, outdir).convert()
| apache-2.0 |
zeapo/UberWriter_old | uberwriter_lib/Builder.py | 6 | 11432 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2012, Wolf Vollprecht <[email protected]>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
### DO NOT EDIT THIS FILE ###
'''Enhances builder connections, provides object to access glade objects'''
from gi.repository import GObject, Gtk # pylint: disable=E0611
import inspect
import functools
import logging
logger = logging.getLogger('uberwriter_lib')
from xml.etree.cElementTree import ElementTree
# this module is big so uses some conventional prefixes and postfixes
# *s list, except self.widgets is a dictionary
# *_dict dictionary
# *name string
# ele_* element in a ElementTree
# pylint: disable=R0904
# the many public methods is a feature of Gtk.Builder
class Builder(Gtk.Builder):
''' extra features
connects glade defined handler to default_handler if necessary
auto connects widget to handler with matching name or alias
auto connects several widgets to a handler via multiple aliases
allow handlers to lookup widget name
logs every connection made, and any on_* not made
'''
def __init__(self):
Gtk.Builder.__init__(self)
self.widgets = {}
self.glade_handler_dict = {}
self.connections = []
self._reverse_widget_dict = {}
# pylint: disable=R0201
# this is a method so that a subclass of Builder can redefine it
def default_handler(self,
handler_name, filename, *args, **kwargs):
'''helps the apprentice guru
glade defined handlers that do not exist come here instead.
An apprentice guru might wonder which signal does what he wants,
now he can define any likely candidates in glade and notice which
ones get triggered when he plays with the project.
this method does not appear in Gtk.Builder'''
logger.debug('''tried to call non-existent function:%s()
expected in %s
args:%s
kwargs:%s''', handler_name, filename, args, kwargs)
# pylint: enable=R0201
def get_name(self, widget):
''' allows a handler to get the name (id) of a widget
this method does not appear in Gtk.Builder'''
return self._reverse_widget_dict.get(widget)
def add_from_file(self, filename):
'''parses xml file and stores wanted details'''
Gtk.Builder.add_from_file(self, filename)
# extract data for the extra interfaces
tree = ElementTree()
tree.parse(filename)
ele_widgets = tree.getiterator("object")
for ele_widget in ele_widgets:
name = ele_widget.attrib['id']
widget = self.get_object(name)
# populate indexes - a dictionary of widgets
self.widgets[name] = widget
# populate a reversed dictionary
self._reverse_widget_dict[widget] = name
# populate connections list
ele_signals = ele_widget.findall("signal")
connections = [
(name,
ele_signal.attrib['name'],
ele_signal.attrib['handler']) for ele_signal in ele_signals]
if connections:
self.connections.extend(connections)
ele_signals = tree.getiterator("signal")
for ele_signal in ele_signals:
self.glade_handler_dict.update(
{ele_signal.attrib["handler"]: None})
def connect_signals(self, callback_obj):
'''connect the handlers defined in glade
reports successful and failed connections
and logs call to missing handlers'''
filename = inspect.getfile(callback_obj.__class__)
callback_handler_dict = dict_from_callback_obj(callback_obj)
connection_dict = {}
connection_dict.update(self.glade_handler_dict)
connection_dict.update(callback_handler_dict)
for item in connection_dict.items():
if item[1] is None:
# the handler is missing so reroute to default_handler
handler = functools.partial(
self.default_handler, item[0], filename)
connection_dict[item[0]] = handler
# replace the run time warning
logger.warn("expected handler '%s' in %s",
item[0], filename)
# connect glade define handlers
Gtk.Builder.connect_signals(self, connection_dict)
# let's tell the user how we applied the glade design
for connection in self.connections:
widget_name, signal_name, handler_name = connection
logger.debug("connect builder by design '%s', '%s', '%s'",
widget_name, signal_name, handler_name)
def get_ui(self, callback_obj=None, by_name=True):
'''Creates the ui object with widgets as attributes
connects signals by 2 methods
this method does not appear in Gtk.Builder'''
result = UiFactory(self.widgets)
# Hook up any signals the user defined in glade
if callback_obj is not None:
# connect glade define handlers
self.connect_signals(callback_obj)
if by_name:
auto_connect_by_name(callback_obj, self)
return result
# pylint: disable=R0903
# this class deliberately does not provide any public interfaces
# apart from the glade widgets
class UiFactory():
''' provides an object with attributes as glade widgets'''
def __init__(self, widget_dict):
self._widget_dict = widget_dict
for (widget_name, widget) in widget_dict.items():
setattr(self, widget_name, widget)
# Mangle any non-usable names (like with spaces or dashes)
# into pythonic ones
cannot_message = """cannot bind ui.%s, name already exists
consider using a pythonic name instead of design name '%s'"""
consider_message = """consider using a pythonic name instead of design name '%s'"""
for (widget_name, widget) in widget_dict.items():
pyname = make_pyname(widget_name)
if pyname != widget_name:
if hasattr(self, pyname):
logger.debug(cannot_message, pyname, widget_name)
else:
logger.debug(consider_message, widget_name)
setattr(self, pyname, widget)
def iterator():
'''Support 'for o in self' '''
return iter(widget_dict.values())
setattr(self, '__iter__', iterator)
def __getitem__(self, name):
'access as dictionary where name might be non-pythonic'
return self._widget_dict[name]
# pylint: enable=R0903
def make_pyname(name):
''' mangles non-pythonic names into pythonic ones'''
pyname = ''
for character in name:
if (character.isalpha() or character == '_' or
(pyname and character.isdigit())):
pyname += character
else:
pyname += '_'
return pyname
# Until bug https://bugzilla.gnome.org/show_bug.cgi?id=652127 is fixed, we
# need to reimplement inspect.getmembers. GObject introspection doesn't
# play nice with it.
def getmembers(obj, check):
members = []
for k in dir(obj):
try:
attr = getattr(obj, k)
except:
continue
if check(attr):
members.append((k, attr))
members.sort()
return members
def dict_from_callback_obj(callback_obj):
'''a dictionary interface to callback_obj'''
methods = getmembers(callback_obj, inspect.ismethod)
aliased_methods = [x[1] for x in methods if hasattr(x[1], 'aliases')]
# a method may have several aliases
#~ @alias('on_btn_foo_clicked')
#~ @alias('on_tool_foo_activate')
#~ on_menu_foo_activate():
#~ pass
alias_groups = [(x.aliases, x) for x in aliased_methods]
aliases = []
for item in alias_groups:
for alias in item[0]:
aliases.append((alias, item[1]))
dict_methods = dict(methods)
dict_aliases = dict(aliases)
results = {}
results.update(dict_methods)
results.update(dict_aliases)
return results
def auto_connect_by_name(callback_obj, builder):
'''finds handlers like on_<widget_name>_<signal> and connects them
i.e. find widget,signal pair in builder and call
widget.connect(signal, on_<widget_name>_<signal>)'''
callback_handler_dict = dict_from_callback_obj(callback_obj)
for item in builder.widgets.items():
(widget_name, widget) = item
signal_ids = []
try:
widget_type = type(widget)
while widget_type:
signal_ids.extend(GObject.signal_list_ids(widget_type))
widget_type = GObject.type_parent(widget_type)
except RuntimeError: # pylint wants a specific error
pass
signal_names = [GObject.signal_name(sid) for sid in signal_ids]
# Now, automatically find any the user didn't specify in glade
for sig in signal_names:
# using convention suggested by glade
sig = sig.replace("-", "_")
handler_names = ["on_%s_%s" % (widget_name, sig)]
# Using the convention that the top level window is not
# specified in the handler name. That is use
# on_destroy() instead of on_windowname_destroy()
if widget is callback_obj:
handler_names.append("on_%s" % sig)
do_connect(item, sig, handler_names,
callback_handler_dict, builder.connections)
log_unconnected_functions(callback_handler_dict, builder.connections)
def do_connect(item, signal_name, handler_names,
callback_handler_dict, connections):
'''connect this signal to an unused handler'''
widget_name, widget = item
for handler_name in handler_names:
target = handler_name in callback_handler_dict.keys()
connection = (widget_name, signal_name, handler_name)
duplicate = connection in connections
if target and not duplicate:
widget.connect(signal_name, callback_handler_dict[handler_name])
connections.append(connection)
logger.debug("connect builder by name '%s','%s', '%s'",
widget_name, signal_name, handler_name)
def log_unconnected_functions(callback_handler_dict, connections):
'''log functions like on_* that we could not connect'''
connected_functions = [x[2] for x in connections]
handler_names = callback_handler_dict.keys()
unconnected = [x for x in handler_names if x.startswith('on_')]
for handler_name in connected_functions:
try:
unconnected.remove(handler_name)
except ValueError:
pass
for handler_name in unconnected:
logger.debug("Not connected to builder '%s'", handler_name)
| gpl-3.0 |
manipopopo/tensorflow | tensorflow/python/grappler/cluster_test.py | 33 | 7748 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the swig wrapper of clusters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.grappler import cluster
from tensorflow.python.grappler import item
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class ClusterTest(test.TestCase):
def testBasic(self):
with ops.Graph().as_default() as g:
a = random_ops.random_uniform(shape=())
b = random_ops.random_uniform(shape=())
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
grappler_cluster = cluster.Cluster(
disable_detailed_stats=False, disable_timeline=False)
op_perfs, run_time, step_stats = grappler_cluster.MeasureCosts(
grappler_item)
self.assertTrue(run_time > 0)
self.assertEqual(len(op_perfs), 4)
self.assertTrue(step_stats.dev_stats)
def testNoDetailedStats(self):
with ops.Graph().as_default() as g:
a = random_ops.random_uniform(shape=())
b = random_ops.random_uniform(shape=())
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
grappler_cluster = cluster.Cluster(disable_detailed_stats=True)
op_perfs, run_time, step_stats = grappler_cluster.MeasureCosts(
grappler_item)
self.assertTrue(run_time > 0)
self.assertEqual(len(op_perfs), 0)
self.assertEqual(len(step_stats.dev_stats), 0)
def testMemoryEstimates(self):
with ops.Graph().as_default() as g:
with ops.device('/job:localhost/replica:0/task:0/device:CPU:0'):
a = random_ops.random_uniform(shape=())
b = random_ops.random_uniform(shape=())
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
grappler_cluster = cluster.Cluster(
disable_detailed_stats=True, disable_timeline=True)
peak_mem = grappler_cluster.DeterminePeakMemoryUsage(grappler_item)
self.assertLessEqual(1, len(peak_mem))
snapshot = peak_mem['/job:localhost/replica:0/task:0/device:CPU:0']
peak_usage = snapshot[0]
self.assertEqual(52, peak_usage)
live_tensors = snapshot[1]
self.assertEqual(15, len(live_tensors))
def testVirtualCluster(self):
with ops.Graph().as_default() as g:
with ops.device('/device:GPU:0'):
a = random_ops.random_uniform(shape=[1024, 1024])
b = random_ops.random_uniform(shape=[1024, 1024])
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
device_properties = device_properties_pb2.DeviceProperties(
type='GPU',
frequency=1000,
num_cores=60,
environment={
'architecture': '7'
})
named_device = device_properties_pb2.NamedDevice(
properties=device_properties, name='/device:GPU:0')
grappler_cluster = cluster.Cluster(
disable_detailed_stats=False,
disable_timeline=False,
devices=[named_device])
op_perfs, run_time, _ = grappler_cluster.MeasureCosts(grappler_item)
self.assertEqual(run_time, 0.000545)
self.assertEqual(len(op_perfs), 15)
estimated_perf = grappler_cluster.EstimatePerformance(named_device)
self.assertEqual(7680.0, estimated_perf)
def testContext(self):
with ops.Graph().as_default() as g:
a = random_ops.random_uniform(shape=())
b = random_ops.random_uniform(shape=())
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
with cluster.Provision(
disable_detailed_stats=False, disable_timeline=False) as gcluster:
op_perfs, run_time, step_stats = gcluster.MeasureCosts(grappler_item)
self.assertTrue(run_time > 0)
self.assertEqual(len(op_perfs), 4)
self.assertTrue(step_stats.dev_stats)
def testAvailableOps(self):
with cluster.Provision() as gcluster:
op_names = gcluster.ListAvailableOps()
self.assertTrue('Add' in op_names)
self.assertTrue('MatMul' in op_names)
self.assertEqual(op_names, sorted(op_names))
def testSupportDevices(self):
with ops.Graph().as_default() as g:
a = random_ops.random_uniform(shape=(2, 3))
b = random_ops.random_uniform(shape=(2, 3))
c = a + b
dims = math_ops.range(0, array_ops.rank(c), 1)
d = math_ops.reduce_sum(a, axis=dims)
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
device_properties = device_properties_pb2.DeviceProperties(
type='GPU', frequency=1000, num_cores=60)
named_gpu = device_properties_pb2.NamedDevice(
properties=device_properties, name='/GPU:0')
device_properties = device_properties_pb2.DeviceProperties(
type='CPU', frequency=3000, num_cores=6)
named_cpu = device_properties_pb2.NamedDevice(
properties=device_properties, name='/CPU:0')
virtual_cluster = cluster.Cluster(devices=[named_cpu, named_gpu])
supported_dev = virtual_cluster.GetSupportedDevices(grappler_item)
self.assertEqual(supported_dev['add'], ['/CPU:0', '/GPU:0'])
self.assertEqual(supported_dev['Sum'], ['/CPU:0', '/GPU:0'])
self.assertEqual(supported_dev['range'], ['/CPU:0', '/GPU:0'])
real_cluster = cluster.Cluster()
supported_dev = real_cluster.GetSupportedDevices(grappler_item)
if test.is_gpu_available():
self.assertEqual(supported_dev['add'], [
'/job:localhost/replica:0/task:0/device:CPU:0',
'/job:localhost/replica:0/task:0/device:GPU:0'
])
self.assertEqual(supported_dev['Sum'], [
'/job:localhost/replica:0/task:0/device:CPU:0',
'/job:localhost/replica:0/task:0/device:GPU:0'
])
# The axis tensor must reside on the host
self.assertEqual(supported_dev['range'],
['/job:localhost/replica:0/task:0/device:CPU:0'])
else:
self.assertEqual(supported_dev['add'],
['/job:localhost/replica:0/task:0/device:CPU:0'])
if __name__ == '__main__':
test.main()
| apache-2.0 |
seem-sky/kbengine | kbe/res/scripts/common/Lib/test/test_sys_settrace.py | 99 | 24959 | # Testing the line trace facility.
from test import support
import unittest
import sys
import difflib
import gc
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Many of the tests below are tricky because they involve pass statements.
# If there is implicit control flow around a pass statement (in an except
# clause or else caluse) under what conditions do you set a line number
# following that clause?
# The entire "while 0:" statement is optimized away. No code
# exists for it, so the line numbers skip directly from "del x"
# to "x = 1".
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just one instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
y = 1
while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(4, 'line'),
(4, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception as exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError as exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
# This test is interesting because of the else: pass
# part of the code. The code generate for the true
# part of the if contains a jump past the else branch.
# The compiler then generates an implicit "return None"
# Internally, the compiler visits the pass statement
# and stores its line number for use on the next instruction.
# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(6, 'line'),
(6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
def generator_function():
try:
yield True
"continued"
finally:
"finally"
def generator_example():
# any() will leave the generator before its end
x = any(generator_function())
# the following lines were not traced
for x in range(10):
y = x
generator_example.events = ([(0, 'call'),
(2, 'line'),
(-6, 'call'),
(-5, 'line'),
(-4, 'line'),
(-4, 'return'),
(-4, 'call'),
(-4, 'exception'),
(-1, 'line'),
(-1, 'return')] +
[(5, 'line'), (6, 'line')] * 10 +
[(5, 'line'), (5, 'return')])
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
def traceWithGenexp(self, frame, event, arg):
(o for o in [1])
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
# Disable gc collection when tracing, otherwise the
# deallocators may be traced as well.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
self.addCleanup(sys.settrace, sys.gettrace())
def tearDown(self):
if self.using_gc:
gc.enable()
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff([str(x) for x in expected_events],
[str(x) for x in events])))
def run_and_compare(self, func, events):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.__code__.co_firstlineno,
tracer.events, events)
def run_test(self, func):
self.run_and_compare(func, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.__code__.co_firstlineno,
tracer.events, func.events)
def test_set_and_retrieve_none(self):
sys.settrace(None)
assert sys.gettrace() is None
def test_set_and_retrieve_func(self):
def fn(*args):
pass
sys.settrace(fn)
try:
assert sys.gettrace() is fn
finally:
sys.settrace(None)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test(no_pop_tops)
def test_06_call(self):
self.run_test(call)
def test_07_raise(self):
self.run_test(test_raise)
def test_08_settrace_and_return(self):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
def test_10_ireturn(self):
self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
self.run_test(tighterloop_example)
def test_13_genexp(self):
self.run_test(generator_example)
# issue1265: if the trace function contains a generator,
# and if the traced function contains another generator
# that is not completely exhausted, the trace stopped.
# Worse: the 'finally' clause was not invoked.
tracer = Tracer()
sys.settrace(tracer.traceWithGenexp)
generator_example()
sys.settrace(None)
self.compare_events(generator_example.__code__.co_firstlineno,
tracer.events, generator_example.events)
def test_14_onliner_if(self):
def onliners():
if True: False
else: True
return 0
self.run_and_compare(
onliners,
[(0, 'call'),
(1, 'line'),
(3, 'line'),
(3, 'return')])
def test_15_loops(self):
# issue1750076: "while" expression is skipped by debugger
def for_example():
for x in range(2):
pass
self.run_and_compare(
for_example,
[(0, 'call'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(1, 'return')])
def while_example():
# While expression should be traced on every loop
x = 2
while x > 0:
x -= 1
self.run_and_compare(
while_example,
[(0, 'call'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(3, 'return')])
def test_16_blank_lines(self):
namespace = {}
exec("def f():\n" + "\n" * 256 + " pass", namespace)
self.run_and_compare(
namespace["f"],
[(0, 'call'),
(257, 'line'),
(257, 'return')])
class RaisingTraceFuncTestCase(unittest.TestCase):
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
def trace(self, frame, event, arg):
"""A trace function that raises an exception in response to a
specific trace event."""
if event == self.raiseOnEvent:
raise ValueError # just something that isn't RuntimeError
else:
return self.trace
def f(self):
"""The function to trace; raises an exception if that's the case
we're testing, so that the 'exception' trace event fires."""
if self.raiseOnEvent == 'exception':
x = 0
y = 1/x
else:
return 1
def run_test_for_event(self, event):
"""Tests that an exception raised in response to the given event is
handled OK."""
self.raiseOnEvent = event
try:
for i in range(sys.getrecursionlimit() + 1):
sys.settrace(self.trace)
try:
self.f()
except ValueError:
pass
else:
self.fail("exception not raised!")
except RuntimeError:
self.fail("recursion counter not reset")
# Test the handling of exceptions raised by each kind of trace event.
def test_call(self):
self.run_test_for_event('call')
def test_line(self):
self.run_test_for_event('line')
def test_return(self):
self.run_test_for_event('return')
def test_exception(self):
self.run_test_for_event('exception')
def test_trash_stack(self):
def f():
for i in range(5):
print(i) # line tracing will raise an exception at this line
def g(frame, why, extra):
if (why == 'line' and
frame.f_lineno == f.__code__.co_firstlineno + 2):
raise RuntimeError("i am crashing")
return g
sys.settrace(g)
try:
f()
except RuntimeError:
# the test is really that this doesn't segfault:
import gc
gc.collect()
else:
self.fail("exception not propagated")
def test_exception_arguments(self):
def f():
x = 0
# this should raise an error
x.no_such_attr
def g(frame, event, arg):
if (event == 'exception'):
type, exception, trace = arg
self.assertIsInstance(exception, Exception)
return g
existing = sys.gettrace()
try:
sys.settrace(g)
try:
f()
except AttributeError:
# this is expected
pass
finally:
sys.settrace(existing)
# 'Jump' tests: assigning to frame.f_lineno within a trace function
# moves the execution position - it's how debuggers implement a Jump
# command (aka. "Set next statement").
class JumpTracer:
"""Defines a trace function that jumps from one place to another,
with the source and destination lines of the jump being defined by
the 'jump' property of the function under test."""
def __init__(self, function):
self.function = function
self.jumpFrom = function.jump[0]
self.jumpTo = function.jump[1]
self.done = False
def trace(self, frame, event, arg):
if not self.done and frame.f_code == self.function.__code__:
firstLine = frame.f_code.co_firstlineno
if event == 'line' and frame.f_lineno == firstLine + self.jumpFrom:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
frame.f_lineno = firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
# The first set of 'jump' tests are for things that are allowed:
def jump_simple_forwards(output):
output.append(1)
output.append(2)
output.append(3)
jump_simple_forwards.jump = (1, 3)
jump_simple_forwards.output = [3]
def jump_simple_backwards(output):
output.append(1)
output.append(2)
jump_simple_backwards.jump = (2, 1)
jump_simple_backwards.output = [1, 1, 2]
def jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
for j in [3]: # Also tests jumping over a block
output.append(4)
output.append(5)
jump_out_of_block_forwards.jump = (3, 5)
jump_out_of_block_forwards.output = [2, 5]
def jump_out_of_block_backwards(output):
output.append(1)
for i in [1]:
output.append(3)
for j in [2]: # Also tests jumping over a block
output.append(5)
output.append(6)
output.append(7)
jump_out_of_block_backwards.jump = (6, 1)
jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
def jump_to_codeless_line(output):
output.append(1)
# Jumping to this line should skip to the next one.
output.append(3)
jump_to_codeless_line.jump = (1, 2)
jump_to_codeless_line.output = [3]
def jump_to_same_line(output):
output.append(1)
output.append(2)
output.append(3)
jump_to_same_line.jump = (2, 2)
jump_to_same_line.output = [1, 2, 3]
# Tests jumping within a finally block, and over one.
def jump_in_nested_finally(output):
try:
output.append(2)
finally:
output.append(4)
try:
output.append(6)
finally:
output.append(8)
output.append(9)
jump_in_nested_finally.jump = (4, 9)
jump_in_nested_finally.output = [2, 9]
# The second set of 'jump' tests are for things that are not allowed:
def no_jump_too_far_forwards(output):
try:
output.append(2)
output.append(3)
except ValueError as e:
output.append('after' in str(e))
no_jump_too_far_forwards.jump = (3, 6)
no_jump_too_far_forwards.output = [2, True]
def no_jump_too_far_backwards(output):
try:
output.append(2)
output.append(3)
except ValueError as e:
output.append('before' in str(e))
no_jump_too_far_backwards.jump = (3, -1)
no_jump_too_far_backwards.output = [2, True]
# Test each kind of 'except' line.
def no_jump_to_except_1(output):
try:
output.append(2)
except:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_1.jump = (2, 3)
no_jump_to_except_1.output = [True]
def no_jump_to_except_2(output):
try:
output.append(2)
except ValueError:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_2.jump = (2, 3)
no_jump_to_except_2.output = [True]
def no_jump_to_except_3(output):
try:
output.append(2)
except ValueError as e:
output.append('except' in str(e))
no_jump_to_except_3.jump = (2, 3)
no_jump_to_except_3.output = [True]
def no_jump_to_except_4(output):
try:
output.append(2)
except (ValueError, RuntimeError) as e:
output.append('except' in str(e))
no_jump_to_except_4.jump = (2, 3)
no_jump_to_except_4.output = [True]
def no_jump_forwards_into_block(output):
try:
output.append(2)
for i in 1, 2:
output.append(4)
except ValueError as e:
output.append('into' in str(e))
no_jump_forwards_into_block.jump = (2, 4)
no_jump_forwards_into_block.output = [True]
def no_jump_backwards_into_block(output):
try:
for i in 1, 2:
output.append(3)
output.append(4)
except ValueError as e:
output.append('into' in str(e))
no_jump_backwards_into_block.jump = (4, 3)
no_jump_backwards_into_block.output = [3, 3, True]
def no_jump_into_finally_block(output):
try:
try:
output.append(3)
x = 1
finally:
output.append(6)
except ValueError as e:
output.append('finally' in str(e))
no_jump_into_finally_block.jump = (4, 6)
no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
def no_jump_out_of_finally_block(output):
try:
try:
output.append(3)
finally:
output.append(5)
output.append(6)
except ValueError as e:
output.append('finally' in str(e))
no_jump_out_of_finally_block.jump = (5, 1)
no_jump_out_of_finally_block.output = [3, True]
# This verifies the line-numbers-must-be-integers rule.
def no_jump_to_non_integers(output):
try:
output.append(2)
except ValueError as e:
output.append('integer' in str(e))
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
def jump_across_with(output):
with open(support.TESTFN, "wb") as fp:
pass
with open(support.TESTFN, "wb") as fp:
pass
jump_across_with.jump = (1, 3)
jump_across_with.output = []
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
try:
previous_frame = sys._getframe().f_back
previous_frame.f_lineno = previous_frame.f_lineno
except ValueError as e:
# This is the exception we wanted; make sure the error message
# talks about trace functions.
if 'trace' not in str(e):
raise
else:
# Something's wrong - the expected exception wasn't raised.
raise RuntimeError("Trace-function-less jump failed to fail")
class JumpTestCase(unittest.TestCase):
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
sys.settrace(None)
def compare_jump_output(self, expected, received):
if received != expected:
self.fail( "Outputs don't match:\n" +
"Expected: " + repr(expected) + "\n" +
"Received: " + repr(received))
def run_test(self, func):
tracer = JumpTracer(func)
sys.settrace(tracer.trace)
output = []
func(output)
sys.settrace(None)
self.compare_jump_output(func.output, output)
def test_01_jump_simple_forwards(self):
self.run_test(jump_simple_forwards)
def test_02_jump_simple_backwards(self):
self.run_test(jump_simple_backwards)
def test_03_jump_out_of_block_forwards(self):
self.run_test(jump_out_of_block_forwards)
def test_04_jump_out_of_block_backwards(self):
self.run_test(jump_out_of_block_backwards)
def test_05_jump_to_codeless_line(self):
self.run_test(jump_to_codeless_line)
def test_06_jump_to_same_line(self):
self.run_test(jump_to_same_line)
def test_07_jump_in_nested_finally(self):
self.run_test(jump_in_nested_finally)
def test_08_no_jump_too_far_forwards(self):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
def test_10_no_jump_to_except_1(self):
self.run_test(no_jump_to_except_1)
def test_11_no_jump_to_except_2(self):
self.run_test(no_jump_to_except_2)
def test_12_no_jump_to_except_3(self):
self.run_test(no_jump_to_except_3)
def test_13_no_jump_to_except_4(self):
self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
self.run_test(no_jump_backwards_into_block)
def test_16_no_jump_into_finally_block(self):
self.run_test(no_jump_into_finally_block)
def test_17_no_jump_out_of_finally_block(self):
self.run_test(no_jump_out_of_finally_block)
def test_18_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
# Must set sys.settrace(None) in setUp(), else condition is not
# triggered.
no_jump_without_trace_function()
def test_jump_across_with(self):
self.addCleanup(support.unlink, support.TESTFN)
self.run_test(jump_across_with)
def test_20_large_function(self):
d = {}
exec("""def f(output): # line 0
x = 0 # line 1
y = 1 # line 2
''' # line 3
%s # lines 4-1004
''' # line 1005
x += 1 # line 1006
output.append(x) # line 1007
return""" % ('\n' * 1000,), d)
f = d['f']
f.jump = (2, 1007)
f.output = [0]
self.run_test(f)
def test_jump_to_firstlineno(self):
# This tests that PDB can jump back to the first line in a
# file. See issue #1689458. It can only be triggered in a
# function call if the function is defined on a single line.
code = compile("""
# Comments don't count.
output.append(2) # firstlineno is here.
output.append(3)
output.append(4)
""", "<fake module>", "exec")
class fake_function:
__code__ = code
jump = (2, 0)
tracer = JumpTracer(fake_function)
sys.settrace(tracer.trace)
namespace = {"output": []}
exec(code, namespace)
sys.settrace(None)
self.compare_jump_output([2, 3, 2, 3, 4], namespace["output"])
def test_main():
support.run_unittest(
TraceTestCase,
RaisingTraceFuncTestCase,
JumpTestCase
)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
melqkiades/yelp | source/python/topicmodeling/external/topicensemble/unsupervised/nmf.py | 2 | 1622 | import numpy as np
from sklearn import decomposition
import logging as log
# --------------------------------------------------------------
class SklNMF:
"""
Wrapper class backed by the scikit-learn package NMF implementation.
"""
def __init__( self, max_iters = 100, init_strategy = "random" ):
self.max_iters = 100
self.init_strategy = init_strategy
self.W = None
self.H = None
def apply( self, X, k = 2, init_W = None, init_H = None ):
"""
Apply NMF to the specified document-term matrix X.
"""
self.W = None
self.H = None
random_seed = np.random.randint( 1, 100000 )
if not (init_W is None or init_H is None):
model = decomposition.NMF( init="custom", n_components=k, max_iter=self.max_iters, random_state = random_seed )
self.W = model.fit_transform( X, W=init_W, H=init_H )
else:
model = decomposition.NMF( init=self.init_strategy, n_components=k, max_iter=self.max_iters, random_state = random_seed )
self.W = model.fit_transform( X )
self.H = model.components_
def rank_terms( self, topic_index, top = -1 ):
"""
Return the top ranked terms for the specified topic, generated during the last NMF run.
"""
if self.H is None:
raise ValueError("No results for previous run available")
# NB: reverse
top_indices = np.argsort( self.H[topic_index,:] )[::-1]
# truncate if necessary
if top < 1 or top > len(top_indices):
return top_indices
return top_indices[0:top]
def generate_partition( self ):
if self.W is None:
raise ValueError("No results for previous run available")
return np.argmax( self.W, axis = 1 ).flatten().tolist()
| lgpl-2.1 |
40223145c2g18/c2g18 | wsgi/static/Brython2.1.0-20140419-113919/Lib/unittest/case.py | 743 | 48873 | """Test case implementation"""
import sys
import functools
import difflib
import pprint
import re
import warnings
import collections
from . import result
from .util import (strclass, safe_repr, _count_diff_all_purpose,
_count_diff_hashable)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestCase.skipTest() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
super(_ExpectedFailure, self).__init__()
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
class _Outcome(object):
def __init__(self):
self.success = True
self.skipped = None
self.unexpectedSuccess = None
self.expectedFailure = None
self.errors = []
self.failures = []
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not isinstance(test_item, type):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesBaseContext(object):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
self.expected = expected
self.test_case = test_case
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if isinstance(expected_regex, (bytes, str)):
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def _raiseFailure(self, standardMsg):
msg = self.test_case._formatMessage(self.msg, standardMsg)
raise self.test_case.failureException(msg)
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertRaisesContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if self.obj_name:
self._raiseFailure("{} not raised by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
# store exception, without traceback, for later retrieval
self.exception = exc_value.with_traceback(None)
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
self._raiseFailure('"{}" does not match "{}"'.format(
expected_regex.pattern, str(exc_value)))
return True
class _AssertWarnsContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertWarns* methods."""
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = warnings.catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
self._raiseFailure('"{}" does not match "{}"'.format(
self.expected_regex.pattern, str(first_matching)))
if self.obj_name:
self._raiseFailure("{} not triggered by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not triggered".format(exc_name))
class TestCase(object):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
When subclassing TestCase, you can set these attributes:
* failureException: determines which exception will be raised when
the instance's assertion methods fail; test methods raising this
exception will be deemed to have 'failed' rather than 'errored'.
* longMessage: determines whether long messages (including repr of
objects used in assert methods) will be printed on failure in *addition*
to any explicit message passed.
* maxDiff: sets the maximum length of a diff in failure messages
by assert methods using difflib. It is looked up as an instance
attribute so can be configured by individual tests if required.
"""
failureException = AssertionError
longMessage = True
maxDiff = 80*8
# If a string is longer than _diffThreshold, use normal comparison instead
# of difflib. See #11763.
_diffThreshold = 2**16
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._outcomeForDoCleanups = None
self._testMethodDoc = 'No test'
try:
testMethod = getattr(self, methodName)
except AttributeError:
if methodName != 'runTest':
# we allow instantiation with no explicit method name
# but not an *incorrect* or missing method name
raise ValueError("no such test method in %s: %s" %
(self.__class__, methodName))
else:
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = {}
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(str, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def _executeTestPart(self, function, outcome, isTest=False):
try:
function()
except KeyboardInterrupt:
raise
except SkipTest as e:
outcome.success = False
outcome.skipped = str(e)
except _UnexpectedSuccess:
exc_info = sys.exc_info()
outcome.success = False
if isTest:
outcome.unexpectedSuccess = exc_info
else:
outcome.errors.append(exc_info)
except _ExpectedFailure:
outcome.success = False
exc_info = sys.exc_info()
if isTest:
outcome.expectedFailure = exc_info
else:
outcome.errors.append(exc_info)
except self.failureException:
outcome.success = False
outcome.failures.append(sys.exc_info())
exc_info = sys.exc_info()
except:
outcome.success = False
outcome.errors.append(sys.exc_info())
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
outcome = _Outcome()
self._outcomeForDoCleanups = outcome
self._executeTestPart(self.setUp, outcome)
if outcome.success:
self._executeTestPart(testMethod, outcome, isTest=True)
self._executeTestPart(self.tearDown, outcome)
self.doCleanups()
if outcome.success:
result.addSuccess(self)
else:
if outcome.skipped is not None:
self._addSkip(result, outcome.skipped)
for exc_info in outcome.errors:
result.addError(self, exc_info)
for exc_info in outcome.failures:
result.addFailure(self, exc_info)
if outcome.unexpectedSuccess is not None:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failures",
RuntimeWarning)
result.addFailure(self, outcome.unexpectedSuccess)
if outcome.expectedFailure is not None:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, outcome.expectedFailure)
else:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
return result
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
outcome = self._outcomeForDoCleanups or _Outcome()
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
part = lambda: function(*args, **kwargs)
self._executeTestPart(part, outcome)
# return this for backwards compatibility
# even though we no longer us it internally
return outcome.success
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"""Check that the expression is false."""
if expr:
msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
# don't switch to '{}' formatting in Python 2.X
# it changes the way unicode input is handled
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
raised, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
An optional keyword argument 'msg' can be provided when assertRaises
is used as a context object.
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertRaisesContext(excClass, self, callableObj)
return context.handle('assertRaises', callableObj, args, kwargs)
def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs):
"""Fail unless a warning of class warnClass is triggered
by callable_obj when invoked with arguments args and keyword
arguments kwargs. If a different type of warning is
triggered, it will not be handled: depending on the other
warning filtering rules in effect, it might be silenced, printed
out, or raised as an exception.
If called with callable_obj omitted or None, will return a
context object used like this::
with self.assertWarns(SomeWarning):
do_something()
An optional keyword argument 'msg' can be provided when assertWarns
is used as a context object.
The context manager keeps a reference to the first matching
warning as the 'warning' attribute; similarly, the 'filename'
and 'lineno' attributes give you information about the line
of Python code from which the warning was triggered.
This allows you to inspect the warning after the assertion::
with self.assertWarns(SomeWarning) as cm:
do_something()
the_warning = cm.warning
self.assertEqual(the_warning.some_attribute, 147)
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj)
return context.handle('assertWarns', callable_obj, args, kwargs)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
if isinstance(asserter, str):
asserter = getattr(self, asserter)
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '!='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in range(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support different types of sets, and
is optimized for sets specifically (parameters must support a
difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, subset, dictionary, msg=None):
"""Checks whether dictionary is a superset of subset."""
warnings.warn('assertDictContainsSubset is deprecated',
DeprecationWarning)
missing = []
mismatched = []
for key, value in subset.items():
if key not in dictionary:
missing.append(key)
elif value != dictionary[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(dictionary[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertCountEqual(self, first, second, msg=None):
"""An unordered sequence comparison asserting that the same elements,
regardless of order. If the same element occurs more than once,
it verifies that the elements occur the same number of times.
self.assertEqual(Counter(list(first)),
Counter(list(second)))
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
first_seq, second_seq = list(first), list(second)
try:
first = collections.Counter(first_seq)
second = collections.Counter(second_seq)
except TypeError:
# Handle case with unhashable elements
differences = _count_diff_all_purpose(first_seq, second_seq)
else:
if first == second:
return
differences = _count_diff_hashable(first_seq, second_seq)
if differences:
standardMsg = 'Element counts were not equal:\n'
lines = ['First has %d, Second has %d: %r' % diff for diff in differences]
diffMsg = '\n'.join(lines)
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertIsInstance(first, str, 'First argument is not a string')
self.assertIsInstance(second, str, 'Second argument is not a string')
if first != second:
# don't use difflib if the strings are too long
if (len(first) > self._diffThreshold or
len(second) > self._diffThreshold):
self._baseAssertEqual(first, second, msg)
firstlines = first.splitlines(keepends=True)
secondlines = second.splitlines(keepends=True)
if len(firstlines) == 1 and first.strip('\r\n') == first:
firstlines = [first + '\n']
secondlines = [second + '\n']
standardMsg = '%s != %s' % (safe_repr(first, True),
safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegex(self, expected_exception, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regex.
Args:
expected_exception: Exception class expected to be raised.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertRaisesRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, callable_obj,
expected_regex)
return context.handle('assertRaisesRegex', callable_obj, args, kwargs)
def assertWarnsRegex(self, expected_warning, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a triggered warning matches a regexp.
Basic functioning is similar to assertWarns() with the addition
that only warnings whose messages also match the regular expression
are considered successful matches.
Args:
expected_warning: Warning class expected to be triggered.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertWarnsRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj,
expected_regex)
return context.handle('assertWarnsRegex', callable_obj, args, kwargs)
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, (str, bytes)):
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
msg = msg or "Regex didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
raise self.failureException(msg)
def assertNotRegex(self, text, unexpected_regex, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regex, (str, bytes)):
unexpected_regex = re.compile(unexpected_regex)
match = unexpected_regex.search(text)
if match:
msg = msg or "Regex matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regex.pattern,
text)
raise self.failureException(msg)
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
'Please use {0} instead.'.format(original_func.__name__),
DeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
# see #9424
failUnlessEqual = assertEquals = _deprecate(assertEqual)
failIfEqual = assertNotEquals = _deprecate(assertNotEqual)
failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual)
failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual)
failUnless = assert_ = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
assertRaisesRegexp = _deprecate(assertRaisesRegex)
assertRegexpMatches = _deprecate(assertRegex)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s tec=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| gpl-2.0 |
ibrikin/hide_my_python | regex.py | 3 | 2916 | #!/usr/bin/env python3
# -*- coding: utf8 -*-
#
# HideMyPython! - A parser for the free proxy list on HideMyAss!
#
# This file defines the different needed regular expressions to retrieve
# the proxy's parameters from the HideMyAss! proxy list.
#
# Copyright (C) 2013 Yannick Méheut <useless (at) utouch (dot) fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
# This regex corresponds to the HTML code describing a proxy
PROXY_HTML = re.compile(r'<tr class=".*?</tr>', flags=re.DOTALL)
# This regex corresponds to the HTML code containing the IP:port of a proxy
IP_PORT_HTML = re.compile(r'<td><span><style>.*?</td>\s*<td>.*?</td>',
flags=re.DOTALL)
# This regex is used to find the class which won't be displayed in the IP:port
# HTML code
DISPLAY_NONE_CLASS = re.compile(r'([a-zA-Z0-9_-]+){display:none}')
# This regex is used to delete everything between <script> and </script> in
# the IP:port HTML code
STYLE = re.compile(r'<style>.*</style>', flags=re.DOTALL)
# This regex is used to delete everything with a style "display:none" in the
# IP:port HTML code
DISPLAY_NONE = re.compile(r'<[^>]*style="display:none">[^<]*<[^>]*>')
# This regex is used to delete everything between a < and a > in the IP:port
# HTML code
TAGS = re.compile(r'<[^>]*>')
# This regex is used to recover the HTML code containing the country in the
# proxy HTML code
COUNTRY_HTML = re.compile(r'<span class="country".*?>.*?</span>',
re.DOTALL)
# This regex is used to recover the country
COUNTRY = re.compile(r'([a-zA-Z, ]*)</span>')
# This regex is used to recover the HTML code containing the speed in the
# proxy HTML code
SPEED_HTML = re.compile(r'<div class="progress-indicator.*?levels="speed" rel.*?>(.*?)</div>',
flags=re.DOTALL)
# This regex is used to recover the speed
SPEED = re.compile(r'style="width: (\d+)%')
# This regex is used to recover the HTML code containing the connection time in
# the proxy HTML code
CONNECT_TIME_HTML = re.compile(r'<div class="progress-indicator.*?levels="speed">(.*?)</div>',
flags=re.DOTALL)
# This regex is used to recover the connection time
CONNECT_TIME = re.compile(r'style="width: (\d+)%')
# This regex is used to recover the type and anonymity level in the proxy
# HTML code
TYPE_ANONYMITY = re.compile(r'<td>(.*?)</td>\s*<td.*?>(.*)</td>')
| gpl-3.0 |
LumPenPacK/NetworkExtractionFromImages | osx_build/nefi2_osx_amd64_xcode_2015/site-packages/networkx/classes/ordered.py | 48 | 1041 | """
OrderedDict variants of the default base classes.
"""
from collections import OrderedDict
from .graph import Graph
from .multigraph import MultiGraph
from .digraph import DiGraph
from .multidigraph import MultiDiGraph
__all__ = []
__all__.extend([
'OrderedGraph',
'OrderedDiGraph',
'OrderedMultiGraph',
'OrderedMultiDiGraph',
])
class OrderedGraph(Graph):
node_dict_factory = OrderedDict
adjlist_dict_factory = OrderedDict
edge_attr_dict_factory = OrderedDict
class OrderedDiGraph(DiGraph):
node_dict_factory = OrderedDict
adjlist_dict_factory = OrderedDict
edge_attr_dict_factory = OrderedDict
class OrderedMultiGraph(MultiGraph):
node_dict_factory = OrderedDict
adjlist_dict_factory = OrderedDict
edge_key_dict_factory = OrderedDict
edge_attr_dict_factory = OrderedDict
class OrderedMultiDiGraph(MultiDiGraph):
node_dict_factory = OrderedDict
adjlist_dict_factory = OrderedDict
edge_key_dict_factory = OrderedDict
edge_attr_dict_factory = OrderedDict
| bsd-2-clause |
chrisseto/osf.io | api/nodes/views.py | 1 | 154904 | import re
from django.apps import apps
from modularodm import Q as MQ
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import PermissionDenied, ValidationError, NotFound, MethodNotAllowed, NotAuthenticated
from rest_framework.response import Response
from rest_framework.status import HTTP_204_NO_CONTENT
from api.addons.serializers import NodeAddonFolderSerializer
from api.addons.views import AddonSettingsMixin
from api.base import generic_bulk_views as bulk_views
from api.base import permissions as base_permissions
from api.base.exceptions import (
InvalidModelValueError,
JSONAPIException,
Gone,
InvalidFilterOperator,
InvalidFilterValue,
RelationshipPostMakesNoChanges,
EndpointNotImplementedError,
)
from api.base.filters import ODMFilterMixin, ListFilterMixin, PreprintFilterMixin
from api.base.pagination import CommentPagination, NodeContributorPagination, MaxSizePagination
from api.base.parsers import (
JSONAPIRelationshipParser,
JSONAPIRelationshipParserForRegularJSON,
JSONAPIMultipleRelationshipsParser,
JSONAPIMultipleRelationshipsParserForRegularJSON,
)
from api.base.settings import ADDONS_OAUTH, API_BASE
from api.base.throttling import (
UserRateThrottle,
NonCookieAuthThrottle,
AddContributorThrottle,
)
from api.base.utils import default_node_list_query, default_node_permission_query
from api.base.utils import get_object_or_error, is_bulk_request, get_user_auth, is_truthy
from api.base.views import JSONAPIBaseView
from api.base.views import (
BaseContributorDetail,
BaseContributorList,
BaseLinkedList,
BaseNodeLinksDetail,
BaseNodeLinksList,
LinkedNodesRelationship,
LinkedRegistrationsRelationship
)
from api.caching.tasks import ban_url
from api.citations.utils import render_citation
from api.comments.permissions import CanCommentOrPublic
from api.comments.serializers import (CommentCreateSerializer,
NodeCommentSerializer)
from api.files.serializers import FileSerializer, OsfStorageFileSerializer
from api.identifiers.serializers import NodeIdentifierSerializer
from api.identifiers.views import IdentifierList
from api.institutions.serializers import InstitutionSerializer
from api.logs.serializers import NodeLogSerializer
from api.nodes.filters import NodeODMFilterMixin, NodesFilterMixin
from api.nodes.permissions import (
IsAdmin,
IsPublic,
AdminOrPublic,
ContributorOrPublic,
RegistrationAndPermissionCheckForPointers,
ContributorDetailPermissions,
ReadOnlyIfRegistration,
IsAdminOrReviewer,
WriteOrPublicForRelationshipInstitutions,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
from api.nodes.serializers import (
NodeSerializer,
ForwardNodeAddonSettingsSerializer,
NodeAddonSettingsSerializer,
NodeLinksSerializer,
NodeForksSerializer,
NodeDetailSerializer,
NodeProviderSerializer,
DraftRegistrationSerializer,
DraftRegistrationDetailSerializer,
NodeContributorsSerializer,
NodeContributorDetailSerializer,
NodeInstitutionsRelationshipSerializer,
NodeAlternativeCitationSerializer,
NodeContributorsCreateSerializer,
NodeViewOnlyLinkSerializer,
NodeViewOnlyLinkUpdateSerializer,
NodeCitationSerializer,
NodeCitationStyleSerializer
)
from api.nodes.utils import get_file_object
from api.preprints.serializers import PreprintSerializer
from api.registrations.serializers import RegistrationSerializer
from api.users.views import UserMixin
from api.wikis.serializers import NodeWikiSerializer
from framework.auth.oauth_scopes import CoreScopes
from framework.postcommit_tasks.handlers import enqueue_postcommit_task
from osf.models import AbstractNode
from osf.models import (Node, PrivateLink, Institution, Comment, DraftRegistration, PreprintService)
from osf.models import OSFUser
from osf.models import NodeRelation, AlternativeCitation, Guid
from osf.models import BaseFileNode
from osf.models.files import File, Folder
from addons.wiki.models import NodeWikiPage
from website.exceptions import NodeStateError
from website.util.permissions import ADMIN, PERMISSIONS
class NodeMixin(object):
"""Mixin with convenience methods for retrieving the current node based on the
current URL. By default, fetches the current node based on the node_id kwarg.
"""
serializer_class = NodeSerializer
node_lookup_url_kwarg = 'node_id'
def get_node(self, check_object_permissions=True):
node = None
if self.kwargs.get('is_embedded') is True:
# If this is an embedded request, the node might be cached somewhere
node = self.request.parents[Node].get(self.kwargs[self.node_lookup_url_kwarg])
if node is None:
node = get_object_or_error(
Node,
self.kwargs[self.node_lookup_url_kwarg],
display_name='node'
)
# Nodes that are folders/collections are treated as a separate resource, so if the client
# requests a collection through a node endpoint, we return a 404
if node.is_collection or node.is_registration:
raise NotFound
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, node)
return node
class DraftMixin(object):
serializer_class = DraftRegistrationSerializer
def get_draft(self, draft_id=None):
node_id = self.kwargs['node_id']
if draft_id is None:
draft_id = self.kwargs['draft_id']
draft = get_object_or_error(DraftRegistration, draft_id)
if not draft.branched_from._id == node_id:
raise ValidationError('This draft registration is not created from the given node.')
if self.request.method not in drf_permissions.SAFE_METHODS:
registered_and_deleted = draft.registered_node and draft.registered_node.is_deleted
if draft.registered_node and not draft.registered_node.is_deleted:
raise PermissionDenied('This draft has already been registered and cannot be modified.')
if draft.is_pending_review:
raise PermissionDenied('This draft is pending review and cannot be modified.')
if draft.requires_approval and draft.is_approved and (not registered_and_deleted):
raise PermissionDenied('This draft has already been approved and cannot be modified.')
self.check_object_permissions(self.request, draft.branched_from)
return draft
class WaterButlerMixin(object):
path_lookup_url_kwarg = 'path'
provider_lookup_url_kwarg = 'provider'
def get_file_item(self, item):
attrs = item['attributes']
file_node = BaseFileNode.resolve_class(
attrs['provider'],
BaseFileNode.FOLDER if attrs['kind'] == 'folder'
else BaseFileNode.FILE
).get_or_create(self.get_node(check_object_permissions=False), attrs['path'])
file_node.update(None, attrs, user=self.request.user)
self.check_object_permissions(self.request, file_node)
return file_node
def fetch_from_waterbutler(self):
node = self.get_node(check_object_permissions=False)
path = self.kwargs[self.path_lookup_url_kwarg]
provider = self.kwargs[self.provider_lookup_url_kwarg]
return self.get_file_object(node, path, provider)
def get_file_object(self, node, path, provider, check_object_permissions=True):
obj = get_file_object(node=node, path=path, provider=provider, request=self.request)
if provider == 'osfstorage':
if check_object_permissions:
self.check_object_permissions(self.request, obj)
return obj
class NodeList(JSONAPIBaseView, bulk_views.BulkUpdateJSONAPIView, bulk_views.BulkDestroyJSONAPIView, bulk_views.ListBulkCreateJSONAPIView, NodesFilterMixin, WaterButlerMixin):
"""Nodes that represent projects and components. *Writeable*.
Paginated list of nodes ordered by their `date_modified`. Each resource contains the full representation of the
node, meaning additional requests to an individual node's detail view are not necessary. Registrations and withdrawn
registrations cannot be accessed through this endpoint (see registration endpoints instead).
<!--- Copied Spiel from NodeDetail -->
On the front end, nodes are considered 'projects' or 'components'. The difference between a project and a component
is that a project is the top-level node, and components are children of the project. There is also a [category
field](/v2/#osf-node-categories) that includes 'project' as an option. The categorization essentially determines
which icon is displayed by the node in the front-end UI and helps with search organization. Top-level nodes may have
a category other than project, and children nodes may have a category of project.
##Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
=================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
registration boolean is this a registration? (always false - may be deprecated in future versions)
fork boolean is this node a fork of another node?
public boolean has this node been made publicly-visible?
preprint boolean is this a preprint?
collection boolean is this a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Creating New Nodes
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "nodes", # required
"attributes": {
"title": {title}, # required
"category": {category}, # required
"description": {description}, # optional
"tags": [{tag1}, {tag2}], # optional
"public": true|false # optional
"template_from": {node_id} # optional
}
}
}
Success: 201 CREATED + node representation
New nodes are created by issuing a POST request to this endpoint. The `title` and `category` fields are
mandatory. `category` must be one of the [permitted node categories](/v2/#osf-node-categories). `public` defaults
to false. All other fields not listed above will be ignored. If the node creation is successful the API will
return a 201 response with the representation of the new node in the body. For the new node's canonical URL, see
the `/links/self` field of the response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
+ `view_only=<Str>` -- Allow users with limited access keys to access this node. Note that some keys are anonymous,
so using the view_only key will cause user-related information to no longer serialize. This includes blank ids for
users and contributors and missing serializer fields and relationships.
Nodes may be filtered by their `id`, `title`, `category`, `description`, `public`, `tags`, `date_created`, `date_modified`,
`root`, `parent`, 'preprint', and `contributors`. Most are string fields and will be filtered using simple substring matching. `public`
and `preprint` are boolean values, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note that quoting `true`
or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NODE_BASE_WRITE]
model_class = apps.get_model('osf.AbstractNode')
serializer_class = NodeSerializer
view_category = 'nodes'
view_name = 'node-list'
ordering = ('-date_modified', ) # default ordering
# overrides NodesFilterMixin
def get_default_queryset(self):
user = self.request.user
base_query = default_node_list_query()
permissions_query = default_node_permission_query(user)
return Node.find(base_query & permissions_query)
# overrides ListBulkCreateJSONAPIView, BulkUpdateJSONAPIView
def get_queryset(self):
# For bulk requests, queryset is formed from request body.
if is_bulk_request(self.request):
query = MQ('_id', 'in', [node['id'] for node in self.request.data])
auth = get_user_auth(self.request)
nodes = AbstractNode.find(query)
# If skip_uneditable=True in query_params, skip nodes for which the user
# does not have EDIT permissions.
if is_truthy(self.request.query_params.get('skip_uneditable', False)):
has_permission = []
for node in nodes:
if node.can_edit(auth):
has_permission.append(node)
query = MQ('_id', 'in', [node._id for node in has_permission])
return AbstractNode.find(query)
for node in nodes:
if not node.can_edit(auth):
raise PermissionDenied
return nodes
else:
return self.get_queryset_from_request().distinct()
# overrides ListBulkCreateJSONAPIView, BulkUpdateJSONAPIView, BulkDestroyJSONAPIView
def get_serializer_class(self):
"""
Use NodeDetailSerializer which requires 'id'
"""
if self.request.method in ('PUT', 'PATCH', 'DELETE'):
return NodeDetailSerializer
else:
return NodeSerializer
# overrides ListBulkCreateJSONAPIView
def perform_create(self, serializer):
"""Create a node.
:param serializer:
"""
# On creation, make sure that current user is the creator
user = self.request.user
serializer.save(creator=user)
# overrides BulkDestroyJSONAPIView
def allow_bulk_destroy_resources(self, user, resource_list):
"""User must have admin permissions to delete nodes."""
if is_truthy(self.request.query_params.get('skip_uneditable', False)):
return any([node.has_permission(user, ADMIN) for node in resource_list])
return all([node.has_permission(user, ADMIN) for node in resource_list])
def bulk_destroy_skip_uneditable(self, resource_object_list, user, object_type):
"""
If skip_uneditable=True in query_params, skip the resources for which the user does not have
admin permissions and delete the remaining resources
"""
allowed = []
skipped = []
if not is_truthy(self.request.query_params.get('skip_uneditable', False)):
return None
for resource in resource_object_list:
if resource.has_permission(user, ADMIN):
allowed.append(resource)
else:
skipped.append({'id': resource._id, 'type': object_type})
return {'skipped': skipped, 'allowed': allowed}
# Overrides BulkDestroyJSONAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
try:
instance.remove_node(auth=auth)
except NodeStateError as err:
raise ValidationError(err.message)
instance.save()
class NodeDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, NodeMixin, WaterButlerMixin):
"""Details about a given node (project or component). *Writeable*.
A registration or withdrawn registration cannot be accessed through this endpoint. See Registration Detail endpoint.
On the front end, nodes are considered 'projects' or 'components'. The difference between a project and a component
is that a project is the top-level node, and components are children of the project. There is also a [category
field](/v2/#osf-node-categories) that includes 'project' as an option. The categorization essentially determines
which icon is displayed by the node in the front-end UI and helps with search organization. Top-level nodes may have
a category other than project, and children nodes may have a category of project.
###Permissions
Nodes that are made public will give read-only access to everyone. Private nodes require explicit read
permission. Write and admin access are the same for public and private nodes. Administrators on a parent node have
implicit read permissions for all child nodes.
##Attributes
OSF Node entities have the "nodes" `type`.
name type description
=================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
registration boolean is this a registration? (always false - may be deprecated in future versions)
fork boolean is this node a fork of another node?
public boolean has this node been made publicly-visible?
collection boolean is this a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
##Relationships
###Children
List of nodes that are children of this node. New child nodes may be added through this endpoint.
###Comments
List of comments on this node. New comments can be left on the node through this endpoint.
###Contributors
List of users who are contributors to this node. Contributors may have "read", "write", or "admin" permissions.
A node must always have at least one "admin" contributor. Contributors may be added via this endpoint.
###Draft Registrations
List of draft registrations of the current node.
###Files
List of top-level folders (actually cloud-storage providers) associated with this node. This is the starting point
for accessing the actual files stored within this node.
###Forked From
If this node was forked from another node, the canonical endpoint of the node that was forked from will be
available in the `/forked_from/links/related/href` key. Otherwise, it will be null.
###Logs
List of read-only log actions pertaining to the node.
###Node Links
List of links (pointers) to other nodes on the OSF. Node links can be added through this endpoint.
###Parent
If this node is a child node of another node, the parent's canonical endpoint will be available in the
`/parent/links/related/href` key. Otherwise, it will be null.
###Registrations
List of registrations of the current node.
###Root
Returns the top-level node associated with the current node. If the current node is the top-level node, the root is
the current node.
### Linked Nodes
List of nodes linked to the current node.
### Linked Registrations
List of registrations linked to the current node.
##Links
self: the canonical api endpoint of this node
html: this node's page on the OSF website
##Actions
###Update
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "nodes", # required
"id": {node_id}, # required
"attributes": {
"title": {title}, # mandatory
"category": {category}, # mandatory
"description": {description}, # optional
"tags": [{tag1}, {tag2}], # optional
"public": true|false # optional
}
}
}
Success: 200 OK + node representation
To update a node, issue either a PUT or a PATCH request against the `/links/self` URL. The `title` and `category`
fields are mandatory if you PUT and optional if you PATCH. The `tags` parameter must be an array of strings.
Non-string values will be accepted and stringified, but we make no promises about the stringification output. So
don't do that.
###Delete
Method: DELETE
URL: /links/self
Params: <none>
Success: 204 No Content
To delete a node, issue a DELETE request against `/links/self`. A successful delete will return a 204 No Content
response. Attempting to delete a node you do not own will result in a 403 Forbidden.
##Query Params
+ `view_only=<Str>` -- Allow users with limited access keys to access this node. Note that some keys are anonymous, so using the view_only key will cause user-related information to no longer serialize. This includes blank ids for users and contributors and missing serializer fields and relationships.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
)
required_read_scopes = [CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NODE_BASE_WRITE]
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
serializer_class = NodeDetailSerializer
view_category = 'nodes'
view_name = 'node-detail'
# overrides RetrieveUpdateDestroyAPIView
def get_object(self):
return self.get_node()
# overrides RetrieveUpdateDestroyAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_object()
try:
node.remove_node(auth=auth)
except NodeStateError as err:
raise ValidationError(err.message)
node.save()
class NodeContributorsList(BaseContributorList, bulk_views.BulkUpdateJSONAPIView, bulk_views.BulkDestroyJSONAPIView, bulk_views.ListBulkCreateJSONAPIView, NodeMixin):
"""Contributors (users) for a node.
Contributors are users who can make changes to the node or, in the case of private nodes,
have read access to the node. Contributors are divided between 'bibliographic' and 'non-bibliographic'
contributors. From a permissions standpoint, both are the same, but bibliographic contributors
are included in citations, while non-bibliographic contributors are not included in citations.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed and the id for
the contributor will be an empty string.
##Node Contributor Attributes
<!--- Copied Attributes from NodeContributorDetail -->
`type` is "contributors"
name type description
======================================================================================================
bibliographic boolean Whether the user will be included in citations for this node. Default is true.
permission string User permission level. Must be "read", "write", or "admin". Default is "write".
unregistered_contributor string Contributor's assigned name if contributor hasn't yet claimed account
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Relationships
###Users
This endpoint shows the contributor user detail and is automatically embedded.
##Actions
###Adding Contributors
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "contributors", # required
"attributes": {
"bibliographic": true|false, # optional
"permission": "read"|"write"|"admin" # optional
},
"relationships": {
"users": {
"data": {
"type": "users", # required
"id": "{user_id}" # required
}
}
}
}
}
Success: 201 CREATED + node contributor representation
Add a contributor to a node by issuing a POST request to this endpoint. This effectively creates a relationship
between the node and the user. Besides the top-level type, there are optional "attributes" which describe the
relationship between the node and the user. `bibliographic` is a boolean and defaults to `true`. `permission` must
be a [valid OSF permission key](/v2/#osf-node-permission-keys) and defaults to `"write"`. A relationship object
with a "data" member, containing the user `type` and user `id` must be included. The id must be a valid user id.
All other fields not listed above will be ignored. If the request is successful the API will return
a 201 response with the representation of the new node contributor in the body. For the new node contributor's
canonical URL, see the `/links/self` field of the response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
NodeContributors may be filtered by `bibliographic`, or `permission` attributes. `bibliographic` is a boolean, and
can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note that quoting `true` or `false` in
the query will cause the match to fail regardless.
+ `profile_image_size=<Int>` -- Modifies `/links/profile_image_url` of the user entities so that it points to
the user's profile image scaled to the given size in pixels. If left blank, the size depends on the image provider.
#This Request/Response
"""
permission_classes = (
AdminOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CONTRIBUTORS_READ]
required_write_scopes = [CoreScopes.NODE_CONTRIBUTORS_WRITE]
model_class = OSFUser
throttle_classes = (AddContributorThrottle, UserRateThrottle, NonCookieAuthThrottle, )
pagination_class = NodeContributorPagination
serializer_class = NodeContributorsSerializer
view_category = 'nodes'
view_name = 'node-contributors'
ordering = ('_order',) # default ordering
# overrides FilterMixin
def postprocess_query_param(self, key, field_name, operation):
if field_name == 'bibliographic':
operation['source_field_name'] = 'visible'
# overrides FilterMixin
def filter_by_field(self, queryset, field_name, operation):
if field_name == 'permission':
if operation['op'] != 'eq':
raise InvalidFilterOperator(value=operation['op'], valid_operators=['eq'])
# operation['value'] should be 'admin', 'write', or 'read'
if operation['value'].lower().strip() not in PERMISSIONS:
raise InvalidFilterValue(value=operation['value'])
return queryset.filter(**{operation['value'].lower().strip(): True})
return super(NodeContributorsList, self).filter_by_field(queryset, field_name, operation)
# overrides ListBulkCreateJSONAPIView, BulkUpdateJSONAPIView, BulkDeleteJSONAPIView
def get_serializer_class(self):
"""
Use NodeContributorDetailSerializer which requires 'id'
"""
if self.request.method == 'PUT' or self.request.method == 'PATCH' or self.request.method == 'DELETE':
return NodeContributorDetailSerializer
elif self.request.method == 'POST':
return NodeContributorsCreateSerializer
else:
return NodeContributorsSerializer
# overrides ListBulkCreateJSONAPIView, BulkUpdateJSONAPIView
def get_queryset(self):
queryset = self.get_queryset_from_request()
# If bulk request, queryset only contains contributors in request
if is_bulk_request(self.request):
contrib_ids = []
for item in self.request.data:
try:
contrib_ids.append(item['id'].split('-')[1])
except AttributeError:
raise ValidationError('Contributor identifier not provided.')
except IndexError:
raise ValidationError('Contributor identifier incorrectly formatted.')
queryset = queryset.filter(user__guids___id__in=contrib_ids)
return queryset
# Overrides BulkDestroyJSONAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_node()
if len(node.visible_contributors) == 1 and node.get_visible(instance):
raise ValidationError('Must have at least one visible contributor')
if not node.contributor_set.filter(user=instance).exists():
raise NotFound('User cannot be found in the list of contributors.')
removed = node.remove_contributor(instance, auth)
if not removed:
raise ValidationError('Must have at least one registered admin contributor')
# Overrides BulkDestroyJSONAPIView
def get_requested_resources(self, request, request_data):
requested_ids = []
for data in request_data:
try:
requested_ids.append(data['id'].split('-')[1])
except IndexError:
raise ValidationError('Contributor identifier incorrectly formatted.')
resource_object_list = OSFUser.find(MQ('_id', 'in', requested_ids))
for resource in resource_object_list:
if getattr(resource, 'is_deleted', None):
raise Gone
if len(resource_object_list) != len(request_data):
raise ValidationError({'non_field_errors': 'Could not find all objects to delete.'})
return resource_object_list
class NodeContributorDetail(BaseContributorDetail, generics.RetrieveUpdateDestroyAPIView, NodeMixin, UserMixin):
"""Detail of a contributor for a node. *Writeable*.
Contributors are users who can make changes to the node or, in the case of private nodes,
have read access to the node. Contributors are divided between 'bibliographic' and 'non-bibliographic'
contributors. From a permissions standpoint, both are the same, but bibliographic contributors
are included in citations, while non-bibliographic contributors are not included in citations.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed and the id for
the contributor will be an empty string.
Contributors can be viewed, removed, and have their permissions and bibliographic status changed via this
endpoint.
##Attributes
`type` is "contributors"
name type description
======================================================================================================
bibliographic boolean Whether the user will be included in citations for this node. Default is true.
permission string User permission level. Must be "read", "write", or "admin". Default is "write".
unregistered_contributor string Contributor's assigned name if contributor hasn't yet claimed account
index integer The position in the list of contributors reflected in the bibliography. Zero Indexed.
##Relationships
###Users
This endpoint shows the contributor user detail.
##Links
self: the canonical api endpoint of this contributor
html: the contributing user's page on the OSF website
profile_image: a url to the contributing user's profile image
##Actions
###Update Contributor
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "contributors", # required
"id": {contributor_id}, # required
"attributes": {
"bibliographic": true|false, # optional
"permission": "read"|"write"|"admin" # optional
"index": "0" # optional
}
}
}
Success: 200 OK + node representation
To update a contributor's bibliographic preferences, order in the bibliography,
or access permissions for the node, issue a PUT request to the
`self` link. Since this endpoint has no mandatory attributes, PUT and PATCH are functionally the same. If the given
user is not already in the contributor list, a 404 Not Found error will be returned. A node must always have at
least one admin, and any attempt to downgrade the permissions of a sole admin will result in a 400 Bad Request
error.
###Remove Contributor
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 No Content
To remove a contributor from a node, issue a DELETE request to the `self` link. Attempting to remove the only admin
from a node will result in a 400 Bad Request response. This request will only remove the relationship between the
node and the user, not the user itself.
##Query Params
+ `profile_image_size=<Int>` -- Modifies `/links/profile_image_url` so that it points the image scaled to the given
size in pixels. If left blank, the size depends on the image provider.
#This Request/Response
"""
permission_classes = (
ContributorDetailPermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CONTRIBUTORS_READ]
required_write_scopes = [CoreScopes.NODE_CONTRIBUTORS_WRITE]
serializer_class = NodeContributorDetailSerializer
view_category = 'nodes'
view_name = 'node-contributor-detail'
# overrides DestroyAPIView
def perform_destroy(self, instance):
node = self.get_node()
auth = get_user_auth(self.request)
if len(node.visible_contributors) == 1 and instance.visible:
raise ValidationError('Must have at least one visible contributor')
removed = node.remove_contributor(instance, auth)
if not removed:
raise ValidationError('Must have at least one registered admin contributor')
class NodeDraftRegistrationsList(JSONAPIBaseView, generics.ListCreateAPIView, NodeMixin):
"""Draft registrations of the current node.
<!--- Copied partially from NodeDraftRegistrationDetail -->
Draft registrations contain the supplemental registration questions that accompany a registration. A registration
is a frozen version of the project that can never be edited or deleted but can be withdrawn.
Your original project remains editable but will now have the registration linked to it.
###Permissions
Users must have admin permission on the node in order to view or create a draft registration.
##Draft Registration Attributes
Draft Registrations have the "draft_registrations" `type`.
name type description
===========================================================================
registration_supplement string id of registration_schema, must be an active schema
registration_metadata dictionary dictionary of question ids and responses from registration schema
datetime_initiated iso8601 timestamp timestamp that the draft was created
datetime_updated iso8601 timestamp timestamp when the draft was last updated
##Relationships
###Branched From
Node that the draft is branched from. The node endpoint is available in `/branched_from/links/related/href`.
###Initiator
User who initiated the draft registration. The user endpoint is available in `/initiator/links/related/href`.
##Registration Schema
Detailed registration schema. The schema endpoint is available in `/registration_schema/links/related/href`.
##Actions
###Create Draft Registration
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "draft_registrations", # required
"attributes": {
"registration_supplement": {schema_id}, # required
"registration_metadata": {"question_id": {"value": "question response"}} # optional
}
}
}
Success: 201 OK + draft representation
To create a draft registration, issue a POST request to the `self` link. Registration supplement must be the id of an
active registration schema. Registration metadata is not required on the creation of the draft. If registration metadata is included,
it must be a dictionary with keys as question ids in the registration supplement, and values as nested dictionaries
matching the specific format in the registration schema. See registration schema endpoints for specifics. If question
is multiple-choice, question response must exactly match one of the possible choices.
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
#This request/response
"""
permission_classes = (
IsAdmin,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_DRAFT_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_DRAFT_REGISTRATIONS_WRITE]
serializer_class = DraftRegistrationSerializer
view_category = 'nodes'
view_name = 'node-draft-registrations'
ordering = ('-date_modified',)
# overrides ListCreateAPIView
def get_queryset(self):
node = self.get_node()
drafts = DraftRegistration.find(MQ('branched_from', 'eq', node))
return [draft for draft in drafts if not draft.registered_node or draft.registered_node.is_deleted]
# overrides ListBulkCreateJSONAPIView
def perform_create(self, serializer):
user = self.request.user
serializer.save(initiator=user, node=self.get_node())
class NodeDraftRegistrationDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, DraftMixin):
"""Details about a given draft registration. *Writeable*.
Draft registrations contain the supplemental registration questions that accompany a registration. A registration
is a frozen version of the project that can never be edited or deleted but can be withdrawn. Answer the questions
in the draft registration with PUT/PATCH requests until you are ready to submit. Final submission will include sending the
draft registration id as part of a POST request to the Node Registrations endpoint.
###Permissions
Users must have admin permission on the node in order to view, update, or delete a draft registration.
##Attributes
Draft Registrations have the "draft_registrations" `type`.
name type description
===========================================================================
registration_supplement string id of registration_schema, must be an active schema
registration_metadata dictionary dictionary of question ids and responses from registration schema
datetime_initiated iso8601 timestamp timestamp that the draft was created
datetime_updated iso8601 timestamp timestamp when the draft was last updated
##Relationships
###Branched From
Node that the draft is branched from. The node endpoint is available in `/branched_from/links/related/href`.
###Initiator
User who initiated the draft registration. The user endpoint is available in `/initiator/links/related/href`.
##Registration Schema
Detailed registration schema. The schema endpoint is available in `/registration_schema/links/related/href`.
##Actions
###Update Draft Registration
Method: PUT/PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"id": {draft_registration_id}, # required
"type": "draft_registrations", # required
"attributes": {
"registration_metadata": {"question_id": {"value": "question response"}} # optional
}
}
}
Success: 200 OK + draft representation
To update a draft registration, issue a PUT/PATCH request to the `self` link. Registration supplement cannot be updated
after the draft registration has been created. Registration metadata is required. It must be a dictionary with
keys as question ids in the registration form, and values as nested dictionaries matching the specific format in the
registration schema. See registration schema endpoints for specifics. If question is multiple-choice, question response
must exactly match one of the possible choices.
###Delete Draft Registration
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 No Content
To delete a draft registration, issue a DELETE request to the `self` link. This request will remove the draft completely.
A draft that has already been registered cannot be deleted.
##Query Params
+ `view_only=<Str>` -- Allow users with limited access keys to access this node. Note that some keys are anonymous,
so using the view_only key will cause user-related information to no longer serialize. This includes blank ids for users and contributors and missing serializer fields and relationships.
#This Request/Response
"""
permission_classes = (
IsAdminOrReviewer,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope
)
required_read_scopes = [CoreScopes.NODE_DRAFT_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_DRAFT_REGISTRATIONS_WRITE]
serializer_class = DraftRegistrationDetailSerializer
view_category = 'nodes'
view_name = 'node-draft-registration-detail'
def get_object(self):
return self.get_draft()
def perform_destroy(self, draft):
DraftRegistration.remove_one(draft)
class NodeRegistrationsList(JSONAPIBaseView, generics.ListCreateAPIView, NodeMixin, DraftMixin):
"""Registrations of the current node.
Registrations are read-only snapshots of a project that can never be edited or deleted but can be withdrawn. This view
is a list of all the registrations and withdrawn registrations of the current node. To create a registration, first
create a draft registration and answer the required supplemental registration questions. Then, submit a POST request
to this endpoint with the draft registration id in the body of the request.
<!--- Copied from RegistrationList -->
A withdrawn registration will display a limited subset of information, namely, title, description,
date_created, registration, withdrawn, date_registered, withdrawal_justification, and registration supplement. All
other fields will be displayed as null. Additionally, the only relationships permitted to be accessed for a withdrawn
registration are the contributors - other relationships will return a 403. Each resource contains the full representation
of the registration, meaning additional requests to an individual registrations's detail view are not necessary.
<!--- Copied Attributes from RegistrationList -->
##Registration Attributes
Registrations have the "registrations" `type`.
name type description
=======================================================================================================
title string title of the registered project or component
description string description of the registered node
category string bode category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
fork boolean is this project a fork?
registration boolean is this node a registration? (always true - may be deprecated in future versions)
collection boolean is this registered node a collection? (always false - may be deprecated in future versions)
public boolean has this registration been made publicly-visible?
withdrawn boolean has this registration been withdrawn?
date_registered iso8601 timestamp timestamp that the registration was created
embargo_end_date iso8601 timestamp when the embargo on this registration will be lifted (if applicable)
withdrawal_justification string reasons for withdrawing the registration
pending_withdrawal boolean is this registration pending withdrawal?
pending_withdrawal_approval boolean is this registration pending approval?
pending_embargo_approval boolean is the associated Embargo awaiting approval by project admins?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Actions
###Create Registration
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "registrations", # required
"attributes": {
"draft_registration": {draft_registration_id}, # required, write-only
"registration_choice": one of ['embargo', 'immediate'], # required, write-only
"lift_embargo": format %Y-%m-%dT%H:%M:%S' # required if registration_choice is 'embargo'
}
}
}
Success: 201 OK + draft representation
To create a registration, issue a POST request to the `self` link. 'draft_registration' must be the id of a completed
draft registration created for the current node. All required supplemental questions in the draft registration must
have been answered. Registration choice should be 'embargo' if you wish to add an embargo date to the registration.
Registrations can have embargo periods for up to four years. 'lift_embargo' should be the embargo end date.
When the embargo expires, the registration will be made public. If 'immediate' is selected as the "registration_choice",
the registration will be made public once it is approved.
##Relationships
###Registered from
The registration is branched from this node.
###Registered by
The registration was initiated by this user.
##Registration Schema
Detailed registration schema. The schema endpoint is available in `/registration_schema/links/related/href`.
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
#This request/response
"""
permission_classes = (
AdminOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationSerializer
view_category = 'nodes'
view_name = 'node-registrations'
ordering = ('-date_modified',)
# overrides ListCreateAPIView
# TODO: Filter out withdrawals by default
def get_queryset(self):
nodes = self.get_node().registrations_all
auth = get_user_auth(self.request)
registrations = [node for node in nodes if node.can_view(auth)]
return registrations
# overrides ListCreateJSONAPIView
def perform_create(self, serializer):
"""Create a registration from a draft.
"""
# On creation, make sure that current user is the creator
draft_id = self.request.data.get('draft_registration', None)
draft = self.get_draft(draft_id)
serializer.save(draft=draft)
class NodeChildrenList(JSONAPIBaseView, bulk_views.ListBulkCreateJSONAPIView, NodeMixin, NodeODMFilterMixin):
"""Children of the current node. *Writeable*.
This will get the next level of child nodes for the selected node if the current user has read access for those
nodes. Creating a node via this endpoint will behave the same as the [node list endpoint](/v2/nodes/), but the new
node will have the selected node set as its parent.
##Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
=================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
registration boolean is this a registration? (always false - may be deprecated in future versions)
fork boolean is this node a fork of another node?
public boolean has this node been made publicly-visible?
collection boolean is this a collection? (always false - may be deprecated in future versions)
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Create Child Node
<!--- Copied Creating New Node from NodeList -->
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "nodes", # required
"attributes": {
"title": {title}, # required
"category": {category}, # required
"description": {description}, # optional
"tags": [{tag1}, {tag2}] # optional
}
}
}
Success: 201 CREATED + node representation
To create a child node of the current node, issue a POST request to this endpoint. The `title` and `category`
fields are mandatory. `category` must be one of the [permitted node categories](/v2/#osf-node-categories). If the
node creation is successful the API will return a 201 response with the representation of the new node in the body.
For the new node's canonical URL, see the `/links/self` field of the response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `id`, `title`, `category`, `description`, `public`, `tags`, `date_created`, `date_modified`,
`root`, `parent`, and `contributors`. Most are string fields and will be filtered using simple substring matching. `public`
is a boolean, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note that quoting `true`
or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
ContributorOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.NODE_CHILDREN_READ]
required_write_scopes = [CoreScopes.NODE_CHILDREN_WRITE]
serializer_class = NodeSerializer
view_category = 'nodes'
view_name = 'node-children'
ordering = ('-date_modified',)
# overrides NodeODMFilterMixin
def get_default_odm_query(self):
return default_node_list_query()
# overrides ListBulkCreateJSONAPIView
def get_queryset(self):
node = self.get_node()
req_query = self.get_query_from_request()
node_pks = node.node_relations.filter(is_node_link=False).select_related('child')\
.values_list('child__pk', flat=True)
query = (
MQ('pk', 'in', node_pks) &
req_query
)
nodes = Node.find(query).order_by('-date_modified')
auth = get_user_auth(self.request)
return [each for each in nodes if each.can_view(auth)]
# overrides ListBulkCreateJSONAPIView
def perform_create(self, serializer):
user = self.request.user
serializer.save(creator=user, parent=self.get_node())
class NodeCitationDetail(JSONAPIBaseView, generics.RetrieveAPIView, NodeMixin):
""" The node citation for a node in CSL format *read only*
##Note
**This API endpoint is under active development, and is subject to change in the future**
##NodeCitationDetail Attributes
name type description
=================================================================================
id string unique ID for the citation
title string title of project or component
author list list of authors for the work
publisher string publisher - most always 'Open Science Framework'
type string type of citation - web
doi string doi of the resource
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CITATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeCitationSerializer
view_category = 'nodes'
view_name = 'node-citation'
def get_object(self):
node = self.get_node()
auth = get_user_auth(self.request)
if not node.is_public and not node.can_view(auth):
raise PermissionDenied if auth.user else NotAuthenticated
return node.csl
class NodeCitationStyleDetail(JSONAPIBaseView, generics.RetrieveAPIView, NodeMixin):
""" The node citation for a node in a specific style's format *read only*
##Note
**This API endpoint is under active development, and is subject to change in the future**
##NodeCitationDetail Attributes
name type description
=================================================================================
citation string complete citation for a node in the given style
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CITATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeCitationStyleSerializer
view_category = 'nodes'
view_name = 'node-citation'
def get_object(self):
node = self.get_node()
auth = get_user_auth(self.request)
if not node.is_public and not node.can_view(auth):
raise PermissionDenied if auth.user else NotAuthenticated
style = self.kwargs.get('style_id')
try:
citation = render_citation(node=node, style=style)
except ValueError as err: # style requested could not be found
csl_name = re.findall('[a-zA-Z]+\.csl', err.message)[0]
raise NotFound('{} is not a known style.'.format(csl_name))
return {'citation': citation, 'id': style}
# TODO: Make NodeLinks filterable. They currently aren't filterable because we have can't
# currently query on a Pointer's node's attributes.
# e.g. Pointer.find(MQ('node.title', 'eq', ...)) doesn't work
class NodeLinksList(BaseNodeLinksList, bulk_views.BulkDestroyJSONAPIView, bulk_views.ListBulkCreateJSONAPIView, NodeMixin):
"""Node Links to other nodes. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Node Link Attributes
`type` is "node_links"
None
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Relationships
### Target Node
This endpoint shows the target node detail and is automatically embedded.
##Actions
###Adding Node Links
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "node_links", # required
"relationships": {
"nodes": {
"data": {
"type": "nodes", # required
"id": "{target_node_id}", # required
}
}
}
}
}
Success: 201 CREATED + node link representation
To add a node link (a pointer to another node), issue a POST request to this endpoint. This effectively creates a
relationship between the node and the target node. The target node must be described as a relationship object with
a "data" member, containing the nodes `type` and the target node `id`.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
model_class = NodeRelation
serializer_class = NodeLinksSerializer
view_category = 'nodes'
view_name = 'node-pointers'
def get_queryset(self):
return self.get_node().node_relations.select_related('child').filter(is_node_link=True, child__is_deleted=False)
# Overrides BulkDestroyJSONAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = get_object_or_error(
Node,
self.kwargs[self.node_lookup_url_kwarg],
display_name='node'
)
if node.is_registration:
raise MethodNotAllowed(method=self.request.method)
node = self.get_node()
try:
node.rm_pointer(instance, auth=auth)
except ValueError as err: # pointer doesn't belong to node
raise ValidationError(err.message)
node.save()
# overrides ListCreateAPIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(NodeLinksList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
class NodeLinksDetail(BaseNodeLinksDetail, generics.RetrieveDestroyAPIView, NodeMixin):
"""Node Link details. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Attributes
`type` is "node_links"
None
##Links
*None*
##Relationships
###Target node
This endpoint shows the target node detail and is automatically embedded.
##Actions
###Remove Node Link
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 No Content
To remove a node link from a node, issue a DELETE request to the `self` link. This request will remove the
relationship between the node and the target node, not the nodes themselves.
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
base_permissions.TokenHasScope,
drf_permissions.IsAuthenticatedOrReadOnly,
RegistrationAndPermissionCheckForPointers,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
serializer_class = NodeLinksSerializer
view_category = 'nodes'
view_name = 'node-pointer-detail'
node_link_lookup_url_kwarg = 'node_link_id'
# overrides RetrieveAPIView
def get_object(self):
node_link = get_object_or_error(
NodeRelation,
self.kwargs[self.node_link_lookup_url_kwarg],
'node link'
)
self.check_object_permissions(self.request, node_link.parent)
return node_link
# overrides DestroyAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_node()
pointer = self.get_object()
try:
node.rm_pointer(pointer, auth=auth)
except ValueError as err: # pointer doesn't belong to node
raise NotFound(err.message)
node.save()
class NodeForksList(JSONAPIBaseView, generics.ListCreateAPIView, NodeMixin, NodeODMFilterMixin):
"""Forks of the current node. *Writeable*.
Paginated list of the current node's forks ordered by their `forked_date`. Forks are copies of projects that you can
change without affecting the original project. When creating a fork, your fork will will only contain public components or those
for which you are a contributor. Private components that you do not have access to will not be forked.
##Node Fork Attributes
<!--- Copied Attributes from NodeDetail with exception of forked_date-->
OSF Node Fork entities have the "nodes" `type`.
name type description
===============================================================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean has this project been registered? (always False)
collection boolean is this node a collection (always False)
fork boolean is this node a fork of another node? (always True)
public boolean has this node been made publicly-visible?
forked_date iso8601 timestamp timestamp when the node was forked
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings List of strings representing the permissions for the current user on this node
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Create Node Fork
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "nodes", # required
"attributes": {
"title": {title} # optional
}
}
}
Success: 201 CREATED + node representation
To create a fork of the current node, issue a POST request to this endpoint. The `title` field is optional, with the
default title being 'Fork of ' + the current node's title. If the fork's creation is successful the API will return a
201 response with the representation of the forked node in the body. For the new fork's canonical URL, see the `/links/self`
field of the response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, `tags`, `date_created`,
`date_modified`, `root`, `parent`, and `contributors`. Most are string fields and will be filtered using simple
substring matching. Others are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`.
Note that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
IsPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.NODE_FORKS_READ, CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NODE_FORKS_WRITE]
serializer_class = NodeForksSerializer
view_category = 'nodes'
view_name = 'node-forks'
ordering = ('-forked_date',)
# overrides ListCreateAPIView
def get_queryset(self):
all_forks = self.get_node().forks.order_by('-forked_date')
auth = get_user_auth(self.request)
node_pks = [node.pk for node in all_forks if node.can_view(auth)]
return AbstractNode.objects.filter(pk__in=node_pks)
# overrides ListCreateAPIView
def perform_create(self, serializer):
serializer.save(node=self.get_node())
# overrides ListCreateAPIView
def get_parser_context(self, http_request):
"""
Tells parser that attributes are not required in request
"""
res = super(NodeForksList, self).get_parser_context(http_request)
res['attributes_required'] = False
return res
class NodeFilesList(JSONAPIBaseView, generics.ListAPIView, WaterButlerMixin, ListFilterMixin, NodeMixin):
"""Files attached to a node for a given provider. *Read-only*.
This gives a list of all of the files and folders that are attached to your project for the given storage provider.
If the provider is not "osfstorage", the metadata for the files in the storage will be retrieved and cached whenever
this endpoint is accessed. To see the cached metadata, GET the endpoint for the file directly (available through
its `/links/info` attribute).
When a create/update/delete action is performed against the file or folder, the action is handled by an external
service called WaterButler. The WaterButler response format differs slightly from the OSF's.
<!--- Copied from FileDetail.Spiel -->
###Waterbutler Entities
When an action is performed against a WaterButler endpoint, it will generally respond with a file entity, a folder
entity, or no content.
####File Entity
name type description
==========================================================================================================
name string name of the file
path string unique identifier for this file entity for this
project and storage provider. may not end with '/'
materialized string the full path of the file relative to the storage
root. may not end with '/'
kind string "file"
etag string etag - http caching identifier w/o wrapping quotes
modified timestamp last modified timestamp - format depends on provider
contentType string MIME-type when available
provider string id of provider e.g. "osfstorage", "s3", "googledrive".
equivalent to addon_short_name on the OSF
size integer size of file in bytes
current_version integer current file version
current_user_can_comment boolean Whether the current user is allowed to post comments
tags array of strings list of tags that describes the file (osfstorage only)
extra object may contain additional data beyond what's described here,
depending on the provider
version integer version number of file. will be 1 on initial upload
hashes object
md5 string md5 hash of file
sha256 string SHA-256 hash of file
####Folder Entity
name type description
======================================================================
name string name of the folder
path string unique identifier for this folder entity for this
project and storage provider. must end with '/'
materialized string the full path of the folder relative to the storage
root. must end with '/'
kind string "folder"
etag string etag - http caching identifier w/o wrapping quotes
extra object varies depending on provider
##File Attributes
<!--- Copied Attributes from FileDetail -->
For an OSF File entity, the `type` is "files" regardless of whether the entity is actually a file or folder. They
can be distinguished by the `kind` attribute. Files and folders use the same representation, but some attributes may
be null for one kind but not the other. `size` will be null for folders. A list of storage provider keys can be
found [here](/v2/#storage-providers).
name type description
===================================================================================================
guid string OSF GUID for this file (if one has been assigned)
name string name of the file or folder; used for display
kind string "file" or "folder"
path string same as for corresponding WaterButler entity
materialized_path string the unix-style path to the file relative to the provider root
size integer size of file in bytes, null for folders
provider string storage provider for this file. "osfstorage" if stored on the
OSF. other examples include "s3" for Amazon S3, "googledrive"
for Google Drive, "box" for Box.com.
last_touched iso8601 timestamp last time the metadata for the file was retrieved. only
applies to non-OSF storage providers.
date_modified iso8601 timestamp timestamp of when this file was last updated*
date_created iso8601 timestamp timestamp of when this file was created*
extra object may contain additional data beyond what's described here,
depending on the provider
hashes object
md5 string md5 hash of file, null for folders
sha256 string SHA-256 hash of file, null for folders
downloads integer number of times the file has been downloaded (for osfstorage files)
* A note on timestamps: for files stored in osfstorage, `date_created` refers to the time the file was
first uploaded to osfstorage, and `date_modified` is the time the file was last updated while in osfstorage.
Other providers may or may not provide this information, but if they do it will correspond to the provider's
semantics for created/modified times. These timestamps may also be stale; metadata retrieved via the File Detail
endpoint is cached. The `last_touched` field describes the last time the metadata was retrieved from the external
provider. To force a metadata update, access the parent folder via its Node Files List endpoint.
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
<!--- Copied from FileDetail.Actions -->
The `links` property of the response provides endpoints for common file operations. The currently-supported actions
are:
###Get Info (*files, folders*)
Method: GET
URL: /links/info
Params: <none>
Success: 200 OK + file representation
The contents of a folder or details of a particular file can be retrieved by performing a GET request against the
`info` link. The response will be a standard OSF response format with the [OSF File attributes](#attributes).
###Download (*files*)
Method: GET
URL: /links/download
Params: <none>
Success: 200 OK + file body
To download a file, issue a GET request against the `download` link. The response will have the Content-Disposition
header set, which will will trigger a download in a browser.
###Create Subfolder (*folders*)
Method: PUT
URL: /links/new_folder
Query Params: ?kind=folder&name={new_folder_name}
Body: <empty>
Success: 201 Created + new folder representation
You can create a subfolder of an existing folder by issuing a PUT request against the `new_folder` link. The
`?kind=folder` portion of the query parameter is already included in the `new_folder` link. The name of the new
subfolder should be provided in the `name` query parameter. The response will contain a [WaterButler folder
entity](#folder-entity). If a folder with that name already exists in the parent directory, the server will return
a 409 Conflict error response.
###Upload New File (*folders*)
Method: PUT
URL: /links/upload
Query Params: ?kind=file&name={new_file_name}
Body (Raw): <file data (not form-encoded)>
Success: 201 Created + new file representation
To upload a file to a folder, issue a PUT request to the folder's `upload` link with the raw file data in the
request body, and the `kind` and `name` query parameters set to `'file'` and the desired name of the file. The
response will contain a [WaterButler file entity](#file-entity) that describes the new file. If a file with the
same name already exists in the folder, the server will return a 409 Conflict error response.
###Update Existing File (*file*)
Method: PUT
URL: /links/upload
Query Params: ?kind=file
Body (Raw): <file data (not form-encoded)>
Success: 200 OK + updated file representation
To update an existing file, issue a PUT request to the file's `upload` link with the raw file data in the request
body and the `kind` query parameter set to `"file"`. The update action will create a new version of the file.
The response will contain a [WaterButler file entity](#file-entity) that describes the updated file.
###Rename (*files, folders*)
Method: POST
URL: /links/move
Query Params: <none>
Body (JSON): {
"action": "rename",
"rename": {new_file_name}
}
Success: 200 OK + new entity representation
To rename a file or folder, issue a POST request to the `move` link with the `action` body parameter set to
`"rename"` and the `rename` body parameter set to the desired name. The response will contain either a folder
entity or file entity with the new name.
###Move & Copy (*files, folders*)
Method: POST
URL: /links/move
Query Params: <none>
Body (JSON): {
// mandatory
"action": "move"|"copy",
"path": {path_attribute_of_target_folder},
// optional
"rename": {new_name},
"conflict": "replace"|"keep", // defaults to 'replace'
"resource": {node_id}, // defaults to current {node_id}
"provider": {provider} // defaults to current {provider}
}
Success: 200 OK or 201 Created + new entity representation
Move and copy actions both use the same request structure, a POST to the `move` url, but with different values for
the `action` body parameters. The `path` parameter is also required and should be the OSF `path` attribute of the
folder being written to. The `rename` and `conflict` parameters are optional. If you wish to change the name of
the file or folder at its destination, set the `rename` parameter to the new name. The `conflict` param governs how
name clashes are resolved. Possible values are `replace` and `keep`. `replace` is the default and will overwrite
the file that already exists in the target folder. `keep` will attempt to keep both by adding a suffix to the new
file's name until it no longer conflicts. The suffix will be ' (**x**)' where **x** is a increasing integer
starting from 1. This behavior is intended to mimic that of the OS X Finder. The response will contain either a
folder entity or file entity with the new name.
Files and folders can also be moved between nodes and providers. The `resource` parameter is the id of the node
under which the file/folder should be moved. It *must* agree with the `path` parameter, that is the `path` must
identify a valid folder under the node identified by `resource`. Likewise, the `provider` parameter may be used to
move the file/folder to another storage provider, but both the `resource` and `path` parameters must belong to a
node and folder already extant on that provider. Both `resource` and `provider` default to the current node and
providers.
If a moved/copied file is overwriting an existing file, a 200 OK response will be returned. Otherwise, a 201
Created will be returned.
###Delete (*file, folders*)
Method: DELETE
URL: /links/delete
Query Params: <none>
Success: 204 No Content
To delete a file or folder send a DELETE request to the `delete` link. Nothing will be returned in the response
body.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Node files may be filtered by `id`, `name`, `node`, `kind`, `path`, `provider`, `size`, and `last_touched`.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.PermissionWithGetter(ContributorOrPublic, 'node'),
base_permissions.PermissionWithGetter(ReadOnlyIfRegistration, 'node'),
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
ordering = ('_materialized_path',) # default ordering
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
view_category = 'nodes'
view_name = 'node-files'
@property
def serializer_class(self):
if self.kwargs[self.provider_lookup_url_kwarg] == 'osfstorage':
return OsfStorageFileSerializer
return FileSerializer
# overrides FilterMixin
def postprocess_query_param(self, key, field_name, operation):
# tag queries will usually be on Tag.name,
# ?filter[tags]=foo should be translated to MQ('tags__name', 'eq', 'foo')
# But queries on lists should be tags, e.g.
# ?filter[tags]=foo,bar should be translated to MQ('tags', 'isnull', True)
# ?filter[tags]=[] should be translated to MQ('tags', 'isnull', True)
if field_name == 'tags':
if operation['value'] not in (list(), tuple()):
operation['source_field_name'] = 'tags__name'
operation['op'] = 'iexact'
if field_name == 'path':
operation['source_field_name'] = '_path'
# NOTE: This is potentially fragile, if we ever add filtering on provider
# we're going to have to get a bit tricky. get_default_queryset should ramain filtering on BaseFileNode, for now
if field_name == 'kind':
if operation['value'].lower() == 'folder':
kind = Folder
else:
# Default to File, should probably raise an exception in the future
kind = File # Default to file
operation['source_field_name'] = 'type'
operation['op'] = 'in'
operation['value'] = [
sub._typedmodels_type
for sub in kind.__subclasses__()
if hasattr(sub, '_typedmodels_type')
]
def get_default_queryset(self):
files_list = self.fetch_from_waterbutler()
if isinstance(files_list, list):
provider = self.kwargs[self.provider_lookup_url_kwarg]
# Resolve to a provider-specific subclass, so that
# trashed file nodes are filtered out automatically
ConcreteFileNode = BaseFileNode.resolve_class(provider, BaseFileNode.ANY)
return ConcreteFileNode.objects.filter(
id__in=[self.get_file_item(file).id for file in files_list],
)
if isinstance(files_list, list) or not isinstance(files_list, Folder):
# We should not have gotten a file here
raise NotFound
return files_list.children.all()
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request().distinct()
class NodeFileDetail(JSONAPIBaseView, generics.RetrieveAPIView, WaterButlerMixin, NodeMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.PermissionWithGetter(ContributorOrPublic, 'node'),
base_permissions.PermissionWithGetter(ReadOnlyIfRegistration, 'node'),
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
serializer_class = FileSerializer
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
view_category = 'nodes'
view_name = 'node-file-detail'
def get_object(self):
fobj = self.fetch_from_waterbutler()
if isinstance(fobj, dict):
return self.get_file_item(fobj)
if isinstance(fobj, list) or not isinstance(fobj, File):
# We should not have gotten a folder here
raise NotFound
return fobj
class NodeAddonList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin, NodeMixin, AddonSettingsMixin):
"""List of addons connected to this node *Read-only*
Paginated list of node addons ordered by their `id` or `addon_short_name`. Attributes other than
`enabled` will be `null` if the addon is not enabled for this node.
## <Addon\>NodeSettings Attributes
OSF <Addon\>NodeSettings entities have the "node_addons" `type`, and their `id` indicates the addon
service provider (eg. `box`, `googledrive`, etc).
name type description
======================================================================================================
external_account_id string _id of the associated ExternalAccount, if any
configured boolean has this node been configured with a folder?
enabled boolean has a node settings object been associated with this node?
folder_id string folder id of linked folder, from third-party service
node_has_auth boolean is this node fully authorized to use an ExternalAccount?
folder_path boolean folder path of linked folder, from third-party service
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
self: the canonical api endpoint of this node_addon
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ExcludeWithdrawals,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_ADDON_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeAddonSettingsSerializer
view_category = 'nodes'
view_name = 'node-addons'
ordering = ('-id',)
def get_default_queryset(self):
qs = []
for addon in ADDONS_OAUTH:
obj = self.get_addon_settings(provider=addon, fail_if_absent=False, check_object_permissions=False)
if obj:
qs.append(obj)
qs.sort()
return qs
get_queryset = get_default_queryset
class NodeAddonDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, generics.CreateAPIView, NodeMixin, AddonSettingsMixin):
"""
Detail of individual addon connected to this node *Writeable*.
Attributes other than `enabled` will be null if the addon is not enabled for this node.
##Permissions
<Addon>NodeSettings that are attached to public Nodes will give read-only access to everyone. Private nodes require explicit read
permission. Write and admin access are the same for public and private nodes. Administrators on a parent node have
implicit read permissions for all child nodes.
Any users with write or admin access to the node are able to deauthorize an enabled addon, but only the addon authorizer is able
to change the configuration (i.e. selected folder) of an already-configured <Addon>NodeSettings entity.
## <Addon>NodeSettings Attributes
OSF <Addon>NodeSettings entities have the "node_addons" `type`, and their `id` indicates the addon
service provider (eg. `box`, `googledrive`, etc).
name type description
======================================================================================================
external_account_id string _id of the associated ExternalAccount, if any
configured boolean has this node been configured with a folder?
enabled boolean has a node settings object been associated with this node?
folder_id string folder id of linked folder, from third-party service
node_has_auth boolean is this node fully authorized to use an ExternalAccount?
folder_path boolean folder path of linked folder, from third-party service
url string Specific to the `forward` addon
label string Specific to the `forward` addon
##Links
self: the canonical api endpoint of this node_addon
##Actions
###Update
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {"data": {
"type": "node_addons", # required
"id": {provider}, # required
"attributes": {
"external_account_id": {account_id}, # optional
"folder_id": {folder_id}, # optional
"folder_path": {folder_path}, # optional - Google Drive specific
"url": {url}, # optional - External Link specific
"label": {label} # optional - External Link specific
}
}
}
Success: 200 OK + node_addon representation
To update a node, issue either a PUT or a PATCH request against the `/links/self` URL. The `external_account_id`,
`enabled`, and `folder_id` fields are mandatory if you PUT and optional if you PATCH. However, at least one is always mandatory.
Non-string values will be accepted and stringified, but we make no promises about the stringification output. So
don't do that.
To delete or deauthorize a node_addon, issue a PUT with all fields set to `null` / `False`, or a PATCH with `enabled` set to `False`.
####Note
Not all addons are currently configurable via the API. The current list of addons that accept PUT/PATCH is [`box`, `dropbox`, `s3`, `googledrive`]
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ExcludeWithdrawals,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_ADDON_READ]
required_write_scopes = [CoreScopes.NODE_ADDON_WRITE]
serializer_class = NodeAddonSettingsSerializer
view_category = 'nodes'
view_name = 'node-addon-detail'
def get_object(self):
return self.get_addon_settings(check_object_permissions=False)
def perform_create(self, serializer):
addon = self.kwargs['provider']
if addon not in ADDONS_OAUTH:
raise NotFound('Requested addon unavailable')
node = self.get_node()
if node.has_addon(addon):
raise InvalidModelValueError(
detail='Add-on {} already enabled for node {}'.format(addon, node._id)
)
return super(NodeAddonDetail, self).perform_create(serializer)
def perform_destroy(self, instance):
addon = instance.config.short_name
node = self.get_node()
if not node.has_addon(instance.config.short_name):
raise NotFound('Node {} does not have add-on {}'.format(node._id, addon))
node.delete_addon(addon, auth=get_user_auth(self.request))
def get_serializer_class(self):
"""
Use NodeDetailSerializer which requires 'id'
"""
if 'provider' in self.kwargs and self.kwargs['provider'] == 'forward':
return ForwardNodeAddonSettingsSerializer
else:
return NodeAddonSettingsSerializer
class NodeAddonFolderList(JSONAPIBaseView, generics.ListAPIView, NodeMixin, AddonSettingsMixin):
"""List of folders that this node can connect to *Read-only*.
Paginated list of folders retrieved from the associated third-party service
##Permissions
<Addon> Folders are visible only to the addon authorizer.
## <Addon> Folder Attributes
OSF <Addon\> Folder entities have the "node_addon_folders" `type`, and their `id` indicates the folder_id
according to the associated service provider (eg. `box`, `googledrive`, etc).
name type description
======================================================================================================
path string path of this folder, according to third-party service
kind string `"folder"`, typically.
provider string `short_name` of third-party service provider
name string name of this folder
folder_id string id of this folder, according to third-party service
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
root: the canonical api endpoint of the root folder for this account
children: the canonical api endpoint of this folder's children
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ExcludeWithdrawals,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_ADDON_READ, CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NULL]
pagination_class = MaxSizePagination
serializer_class = NodeAddonFolderSerializer
view_category = 'nodes'
view_name = 'node-addon-folders'
def get_queryset(self):
# TODO: [OSF-6120] refactor this/NS models to be generalizable
node_addon = self.get_addon_settings()
if not node_addon.has_auth:
raise JSONAPIException(detail='This addon is enabled but an account has not been imported from your user settings',
meta={'link': '{}users/me/addons/{}/accounts/'.format(API_BASE, node_addon.config.short_name)})
path = self.request.query_params.get('path')
folder_id = self.request.query_params.get('id')
if not hasattr(node_addon, 'get_folders'):
raise EndpointNotImplementedError('Endpoint not yet implemented for this addon')
return node_addon.get_folders(path=path, folder_id=folder_id)
class NodeProvider(object):
def __init__(self, provider, node):
self.path = '/'
self.node = node
self.kind = 'folder'
self.name = provider
self.provider = provider
self.node_id = node._id
self.pk = node._id
self.id = node.id
class NodeProvidersList(JSONAPIBaseView, generics.ListAPIView, NodeMixin):
"""List of storage providers enabled for this node. *Read-only*.
Users of the OSF may access their data on a [number of cloud-storage](/v2/#storage-providers) services that have
integrations with the OSF. We call these "providers". By default every node has access to the OSF-provided
storage but may use as many of the supported providers as desired. This endpoint lists all of the providers that are
configured for this node. If you want to add more, you will need to do that in the Open Science Framework front end
for now.
In the OSF filesystem model, providers are treated as folders, but with special properties that distinguish them
from regular folders. Every provider folder is considered a root folder, and may not be deleted through the regular
file API. To see the contents of the provider, issue a GET request to the `/relationships/files/links/related/href`
attribute of the provider resource. The `new_folder` and `upload` actions are handled by another service called
WaterButler, whose response format differs slightly from the OSF's.
<!--- Copied from FileDetail.Spiel -->
###Waterbutler Entities
When an action is performed against a WaterButler endpoint, it will generally respond with a file entity, a folder
entity, or no content.
####File Entity
name type description
=========================================================================
name string name of the file
path string unique identifier for this file entity for this
project and storage provider. may not end with '/'
materialized string the full path of the file relative to the storage
root. may not end with '/'
kind string "file"
etag string etag - http caching identifier w/o wrapping quotes
modified timestamp last modified timestamp - format depends on provider
contentType string MIME-type when available
provider string id of provider e.g. "osfstorage", "s3", "googledrive".
equivalent to addon_short_name on the OSF
size integer size of file in bytes
extra object may contain additional data beyond what's described here,
depending on the provider
version integer version number of file. will be 1 on initial upload
downloads integer count of the number times the file has been downloaded
hashes object
md5 string md5 hash of file
sha256 string SHA-256 hash of file
####Folder Entity
name type description
======================================================================
name string name of the folder
path string unique identifier for this folder entity for this
project and storage provider. must end with '/'
materialized string the full path of the folder relative to the storage
root. must end with '/'
kind string "folder"
etag string etag - http caching identifier w/o wrapping quotes
extra object varies depending on provider
##Provider Attributes
`type` is "files"
name type description
=================================================================================
name string name of the provider
kind string type of this file/folder. always "folder"
path path relative path of this folder within the provider filesys. always "/"
node string node this provider belongs to
provider string provider id, same as "name"
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
<!--- Copied from FileDetail.Actions -->
###Create Subfolder (*folders*)
Method: PUT
URL: /links/new_folder
Query Params: ?kind=folder&name={new_folder_name}
Body: <empty>
Success: 201 Created + new folder representation
You can create a subfolder of an existing folder by issuing a PUT request against the `new_folder` link. The
`?kind=folder` portion of the query parameter is already included in the `new_folder` link. The name of the new
subfolder should be provided in the `name` query parameter. The response will contain a [WaterButler folder
entity](#folder-entity). If a folder with that name already exists in the parent directory, the server will return
a 409 Conflict error response.
###Upload New File (*folders*)
Method: PUT
URL: /links/upload
Query Params: ?kind=file&name={new_file_name}
Body (Raw): <file data (not form-encoded)>
Success: 201 Created + new file representation
To upload a file to a folder, issue a PUT request to the folder's `upload` link with the raw file data in the
request body, and the `kind` and `name` query parameters set to `'file'` and the desired name of the file. The
response will contain a [WaterButler file entity](#file-entity) that describes the new file. If a file with the
same name already exists in the folder, the server will return a 409 Conflict error response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ExcludeWithdrawals,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = NodeProviderSerializer
view_category = 'nodes'
view_name = 'node-providers'
ordering = ('-id',)
def get_provider_item(self, provider):
return NodeProvider(provider, self.get_node())
def get_queryset(self):
return [
self.get_provider_item(addon.config.short_name)
for addon
in self.get_node().get_addons()
if addon.config.has_hgrid_files
and addon.configured
]
class NodeProviderDetail(JSONAPIBaseView, generics.RetrieveAPIView, NodeMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ExcludeWithdrawals,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = NodeProviderSerializer
view_category = 'nodes'
view_name = 'node-provider-detail'
def get_object(self):
return NodeProvider(self.kwargs['provider'], self.get_node())
class NodeAlternativeCitationsList(JSONAPIBaseView, generics.ListCreateAPIView, NodeMixin):
"""List of alternative citations for a project.
##Actions
###Create Alternative Citation
Method: POST
Body (JSON): {
"data": {
"type": "citations", # required
"attributes": {
"name": {name}, # mandatory
"text": {text} # mandatory
}
}
}
Success: 201 Created + new citation representation
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
AdminOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope
)
required_read_scopes = [CoreScopes.NODE_CITATIONS_READ]
required_write_scopes = [CoreScopes.NODE_CITATIONS_WRITE]
serializer_class = NodeAlternativeCitationSerializer
view_category = 'nodes'
view_name = 'alternative-citations'
ordering = ('-id',)
def get_queryset(self):
return self.get_node().alternative_citations.all()
class NodeAlternativeCitationDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, NodeMixin):
"""Details about an alternative citations for a project.
##Actions
###Update Alternative Citation
Method: PUT
Body (JSON): {
"data": {
"type": "citations", # required
"id": {{id}} # required
"attributes": {
"name": {name}, # mandatory
"text": {text} # mandatory
}
}
}
Success: 200 Ok + updated citation representation
###Delete Alternative Citation
Method: DELETE
Success: 204 No content
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
AdminOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope
)
required_read_scopes = [CoreScopes.NODE_CITATIONS_READ]
required_write_scopes = [CoreScopes.NODE_CITATIONS_WRITE]
serializer_class = NodeAlternativeCitationSerializer
view_category = 'nodes'
view_name = 'alternative-citation-detail'
def get_object(self):
try:
return self.get_node().alternative_citations.get(_id=str(self.kwargs['citation_id']))
except AlternativeCitation.DoesNotExist:
raise NotFound
def perform_destroy(self, instance):
self.get_node().remove_citation(get_user_auth(self.request), instance, save=True)
class NodeLogList(JSONAPIBaseView, generics.ListAPIView, NodeMixin, ListFilterMixin):
"""List of Logs associated with a given Node. *Read-only*.
<!--- Copied Description from NodeLogDetail -->
Paginated list of Logs ordered by their `date`. This includes the Logs of the specified Node as well as the logs of that Node's children that the current user has access to.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed.
On the front end, logs show record and show actions done on the OSF. The complete list of loggable actions (in the format {identifier}: {description}) is as follows:
* 'project_created': A Node is created
* 'project_registered': A Node is registered
* 'project_deleted': A Node is deleted
* 'created_from': A Node is created using an existing Node as a template
* 'pointer_created': A Pointer is created
* 'pointer_forked': A Pointer is forked
* 'pointer_removed': A Pointer is removed
* 'node_removed': A component is deleted
* 'node_forked': A Node is forked
===
* 'made_public': A Node is made public
* 'made_private': A Node is made private
* 'tag_added': A tag is added to a Node
* 'tag_removed': A tag is removed from a Node
* 'edit_title': A Node's title is changed
* 'edit_description': A Node's description is changed
* 'updated_fields': One or more of a Node's fields are changed
* 'external_ids_added': An external identifier is added to a Node (e.g. DOI, ARK)
===
* 'contributor_added': A Contributor is added to a Node
* 'contributor_removed': A Contributor is removed from a Node
* 'contributors_reordered': A Contributor's position in a Node's bibliography is changed
* 'permissions_updated': A Contributor's permissions on a Node are changed
* 'made_contributor_visible': A Contributor is made bibliographically visible on a Node
* 'made_contributor_invisible': A Contributor is made bibliographically invisible on a Node
===
* 'wiki_updated': A Node's wiki is updated
* 'wiki_deleted': A Node's wiki is deleted
* 'wiki_renamed': A Node's wiki is renamed
* 'made_wiki_public': A Node's wiki is made public
* 'made_wiki_private': A Node's wiki is made private
===
* 'addon_added': An add-on is linked to a Node
* 'addon_removed': An add-on is unlinked from a Node
* 'addon_file_moved': A File in a Node's linked add-on is moved
* 'addon_file_copied': A File in a Node's linked add-on is copied
* 'addon_file_renamed': A File in a Node's linked add-on is renamed
* 'node_authorized': An addon is authorized for a project
* 'node_deauthorized': An addon is deauthorized for a project
* 'folder_created': A Folder is created in a Node's linked add-on
* 'file_added': A File is added to a Node's linked add-on
* 'file_updated': A File is updated on a Node's linked add-on
* 'file_removed': A File is removed from a Node's linked add-on
* 'file_restored': A File is restored in a Node's linked add-on
===
* 'comment_added': A Comment is added to some item
* 'comment_removed': A Comment is removed from some item
* 'comment_updated': A Comment is updated on some item
===
* 'embargo_initiated': An embargoed Registration is proposed on a Node
* 'embargo_approved': A proposed Embargo of a Node is approved
* 'embargo_cancelled': A proposed Embargo of a Node is cancelled
* 'embargo_completed': A proposed Embargo of a Node is completed
* 'retraction_initiated': A Withdrawal of a Registration is proposed
* 'retraction_approved': A Withdrawal of a Registration is approved
* 'retraction_cancelled': A Withdrawal of a Registration is cancelled
* 'registration_initiated': A Registration of a Node is proposed
* 'registration_approved': A proposed Registration is approved
* 'registration_cancelled': A proposed Registration is cancelled
===
* 'node_created': A Node is created (_deprecated_)
##Log Attributes
<!--- Copied Attributes from LogList -->
OSF Log entities have the "logs" `type`.
name type description
============================================================================
date iso8601 timestamp timestamp of Log creation
action string Log action (see list above)
##Relationships
###Node
The node this log belongs to.
###User
The user who performed the logged action.
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
##Query Params
<!--- Copied Query Params from LogList -->
Logs may be filtered by their `action` and `date`.
#This Request/Response
"""
serializer_class = NodeLogSerializer
view_category = 'nodes'
view_name = 'node-logs'
required_read_scopes = [CoreScopes.NODE_LOG_READ]
required_write_scopes = [CoreScopes.NULL]
log_lookup_url_kwarg = 'node_id'
ordering = ('-date', )
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
def get_default_queryset(self):
auth = get_user_auth(self.request)
queryset = self.get_node().get_aggregate_logs_queryset(auth)
return queryset
def get_queryset(self):
queryset = self.get_queryset_from_request().include(
'node__guids', 'user__guids', 'original_node__guids', limit_includes=10
)
return queryset
class NodeCommentsList(JSONAPIBaseView, generics.ListCreateAPIView, ODMFilterMixin, NodeMixin):
"""List of comments on a node. *Writeable*.
Paginated list of comments ordered by their `date_created.` Each resource contains the full representation of the
comment, meaning additional requests to an individual comment's detail view are not necessary.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed.
###Permissions
Comments on public nodes are given read-only access to everyone. If the node comment-level is "private",
only contributors have permission to comment. If the comment-level is "public" any logged-in OSF user can comment.
Comments on private nodes are only visible to contributors and administrators on the parent node.
##Attributes
OSF comment entities have the "comments" `type`.
name type description
=================================================================================
content string content of the comment
date_created iso8601 timestamp timestamp that the comment was created
date_modified iso8601 timestamp timestamp when the comment was last updated
modified boolean has this comment been edited?
deleted boolean is this comment deleted?
is_abuse boolean has this comment been reported by the current user?
has_children boolean does this comment have replies?
can_edit boolean can the current user edit this comment?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "comments", # required
"attributes": {
"content": {content}, # mandatory
},
"relationships": {
"target": {
"data": {
"type": {target type} # mandatory
"id": {target._id} # mandatory
}
}
}
}
}
Success: 201 CREATED + comment representation
To create a comment on this node, issue a POST request against this endpoint. The comment target id and target type
must be specified. To create a comment on the node overview page, the target `type` would be "nodes" and the `id`
would be the node id. To reply to a comment on this node, the target `type` would be "comments" and the `id` would
be the id of the comment to reply to. The `content` field is mandatory.
If the comment creation is successful the API will return
a 201 response with the representation of the new comment in the body. For the new comment's canonical URL, see the
`/links/self` field of the response.
##Query Params
+ `filter[deleted]=True|False` -- filter comments based on whether or not they are deleted.
The list of node comments includes deleted comments by default. The `deleted` field is a boolean and can be
filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note that quoting `true` or `false` in
the query will cause the match to fail regardless.
+ `filter[date_created][comparison_operator]=YYYY-MM-DDTH:M:S` -- filter comments based on date created.
Comments can also be filtered based on their `date_created` and `date_modified` fields. Possible comparison
operators include 'gt' (greater than), 'gte'(greater than or equal to), 'lt' (less than) and 'lte'
(less than or equal to). The date must be in the format YYYY-MM-DD and the time is optional.
+ `filter[target]=target_id` -- filter comments based on their target id.
The list of comments can be filtered by target id. For example, to get all comments with target = project,
the target_id would be the project_id.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
CanCommentOrPublic,
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.NODE_COMMENTS_READ]
required_write_scopes = [CoreScopes.NODE_COMMENTS_WRITE]
pagination_class = CommentPagination
serializer_class = NodeCommentSerializer
view_category = 'nodes'
view_name = 'node-comments'
ordering = ('-date_created', ) # default ordering
# overrides ODMFilterMixin
def get_default_odm_query(self):
return MQ('node', 'eq', self.get_node()) & MQ('root_target', 'ne', None)
# Hook to make filtering on 'target' work
def postprocess_query_param(self, key, field_name, operation):
if field_name == 'target':
operation['value'] = Guid.load(operation['value'])
def get_queryset(self):
comments = Comment.find(self.get_query_from_request())
for comment in comments:
# Deleted root targets still appear as tuples in the database,
# but need to be None in order for the query to be correct.
if comment.root_target.referent.is_deleted:
comment.root_target = None
comment.save()
return Comment.find(self.get_query_from_request())
def get_serializer_class(self):
if self.request.method == 'POST':
return CommentCreateSerializer
else:
return NodeCommentSerializer
# overrides ListCreateAPIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(NodeCommentsList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
def perform_create(self, serializer):
node = self.get_node()
serializer.validated_data['user'] = self.request.user
serializer.validated_data['node'] = node
serializer.save()
class NodeInstitutionsList(JSONAPIBaseView, generics.ListAPIView, ODMFilterMixin, NodeMixin):
""" Detail of the affiliated institutions a node has, if any. Returns [] if the node has no
affiliated institution.
##Attributes
OSF Institutions have the "institutions" `type`.
name type description
=========================================================================
name string title of the institution
id string unique identifier in the OSF
logo_path string a path to the institution's static logo
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
AdminOrPublic
)
required_read_scopes = [CoreScopes.NODE_BASE_READ, CoreScopes.INSTITUTION_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = InstitutionSerializer
model = Institution
view_category = 'nodes'
view_name = 'node-institutions'
ordering = ('-id',)
def get_queryset(self):
node = self.get_node()
return node.affiliated_institutions.all() or []
class NodeInstitutionsRelationship(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, generics.CreateAPIView, NodeMixin):
""" Relationship Endpoint for Node -> Institutions Relationship
Used to set, remove, update and retrieve the affiliated_institutions of a node to an institution
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "institutions", # required
"id": <institution_id> # required
}]
}
Success: 201
This requires write permissions on the node and for the user making the request to
have the institutions in the payload as affiliated in their account.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "institutions", # required
"id": <institution_id> # required
}]
}
Success: 200
This requires write permissions on the node and for the user making the request to
have the institutions in the payload as affiliated in their account. This will delete
all institutions not listed, meaning a data: [] payload does the same as a DELETE with all
the institutions.
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "institutions", # required
"id": <institution_id> # required
}]
}
Success: 204
This requires write permissions in the node. If the user has admin permissions, the institution in the payload does
not need to be affiliated in their account.
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
WriteOrPublicForRelationshipInstitutions
)
required_read_scopes = [CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NODE_BASE_WRITE]
serializer_class = NodeInstitutionsRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
view_category = 'nodes'
view_name = 'node-relationships-institutions'
def get_object(self):
node = self.get_node(check_object_permissions=False)
obj = {
'data': node.affiliated_institutions.all(),
'self': node
}
self.check_object_permissions(self.request, obj)
return obj
def perform_destroy(self, instance):
data = self.request.data['data']
user = self.request.user
current_insts = {inst._id: inst for inst in instance['data']}
node = instance['self']
for val in data:
if val['id'] in current_insts:
if not user.is_affiliated_with_institution(current_insts[val['id']]) and not node.has_permission(user, 'admin'):
raise PermissionDenied
node.remove_affiliated_institution(inst=current_insts[val['id']], user=user)
node.save()
def create(self, *args, **kwargs):
try:
ret = super(NodeInstitutionsRelationship, self).create(*args, **kwargs)
except RelationshipPostMakesNoChanges:
return Response(status=HTTP_204_NO_CONTENT)
return ret
class NodeWikiList(JSONAPIBaseView, generics.ListAPIView, NodeMixin, ODMFilterMixin):
"""List of wiki pages on a node. *Read only*.
Paginated list of the node's current wiki page versions ordered by their `date_modified.` Each resource contains the
full representation of the wiki, meaning additional requests to an individual wiki's detail view are not necessary.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed.
###Permissions
Wiki pages on public nodes are given read-only access to everyone. Wiki pages on private nodes are only visible to
contributors and administrators on the parent node.
##Attributes
OSF wiki entities have the "wikis" `type`.
name type description
======================================================================================================
name string name of the wiki pag
path string the path of the wiki page
materialized_path string the path of the wiki page
date_modified iso8601 timestamp timestamp when the wiki was last updated
content_type string MIME-type
current_user_can_comment boolean Whether the current user is allowed to post comments
extra object
version integer version number of the wiki
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `filter[name]=<Str>` -- filter wiki pages by name
+ `filter[date_modified][comparison_operator]=YYYY-MM-DDTH:M:S` -- filter wiki pages based on date modified.
Wiki pages can be filtered based on their `date_modified` fields. Possible comparison
operators include 'gt' (greater than), 'gte'(greater than or equal to), 'lt' (less than) and 'lte'
(less than or equal to). The date must be in the format YYYY-MM-DD and the time is optional.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.WIKI_BASE_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeWikiSerializer
view_category = 'nodes'
view_name = 'node-wikis'
ordering = ('-date', ) # default ordering
# overrides ODMFilterMixin
def get_default_odm_query(self):
node = self.get_node()
node_wiki_pages = node.wiki_pages_current.values() if node.wiki_pages_current else []
return MQ('guids___id', 'in', node_wiki_pages)
def get_queryset(self):
return NodeWikiPage.find(self.get_query_from_request())
class NodeLinkedNodesRelationship(LinkedNodesRelationship, NodeMixin):
""" Relationship Endpoint for Nodes -> Linked Node relationships
Used to set, remove, update and retrieve the ids of the linked nodes attached to this collection. For each id, there
exists a node link that contains that node.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the nodes requested. Data can be contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this collection.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the nodes requested. Data can be contain any number of
node identifiers. This will replace the contents of the node_links for this collection with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this collection
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
view_category = 'nodes'
view_name = 'node-pointer-relationship'
class LinkedNodesList(BaseLinkedList, NodeMixin):
"""List of nodes linked to this node. *Read-only*.
Linked nodes are the nodes pointed to by node links. This view will probably replace node_links in the near future.
<!--- Copied Spiel from NodeDetail -->
On the front end, nodes are considered 'projects' or 'components'. The difference between a project and a component
is that a project is the top-level node, and components are children of the project. There is also a [category
field](/v2/#osf-node-categories) that includes 'project' as an option. The categorization essentially determines
which icon is displayed by the node in the front-end UI and helps with search organization. Top-level nodes may have
a category other than project, and children nodes may have a category of project.
##Linked Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
=================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean is this is a registration?
collection boolean is this node a collection of other nodes?
public boolean has this node been made publicly-visible?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
serializer_class = NodeSerializer
view_category = 'nodes'
view_name = 'linked-nodes'
def get_queryset(self):
queryset = super(LinkedNodesList, self).get_queryset()
return queryset.exclude(type='osf.registration')
# overrides APIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(LinkedNodesList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
class NodeLinkedRegistrationsRelationship(LinkedRegistrationsRelationship, NodeMixin):
""" Relationship Endpoint for Node -> Linked Registration relationships
Used to set, remove, update and retrieve the ids of the linked registrations attached to this node. For each id, there
exists a node link that contains that node.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the node, and for the user that is
making the request to be able to read the registrations requested. Data can contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this node.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the node, and for the user that is
making the request to be able to read the registrations requested. Data can contain any number of
node identifiers. This will replace the contents of the node_links for this node with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this node.
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
view_category = 'nodes'
view_name = 'node-registration-pointer-relationship'
class NodeLinkedRegistrationsList(BaseLinkedList, NodeMixin):
"""List of registrations linked to this node. *Read-only*.
Linked registrations are the registration nodes pointed to by node links.
<!--- Copied Spiel from RegistrationDetail -->
Registrations are read-only snapshots of a project. This view shows details about the given registration.
Each resource contains the full representation of the registration, meaning additional requests to an individual
registration's detail view are not necessary. A withdrawn registration will display a limited subset of information,
namely, title, description, date_created, registration, withdrawn, date_registered, withdrawal_justification, and
registration supplement. All other fields will be displayed as null. Additionally, the only relationships permitted
to be accessed for a withdrawn registration are the contributors - other relationships will return a 403.
##Linked Registration Attributes
<!--- Copied Attributes from RegistrationDetail -->
Registrations have the "registrations" `type`.
name type description
=======================================================================================================
title string title of the registered project or component
description string description of the registered node
category string bode category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
fork boolean is this project a fork?
registration boolean has this project been registered? (always true - may be deprecated in future versions)
collection boolean is this registered node a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
public boolean has this registration been made publicly-visible?
withdrawn boolean has this registration been withdrawn?
date_registered iso8601 timestamp timestamp that the registration was created
embargo_end_date iso8601 timestamp when the embargo on this registration will be lifted (if applicable)
withdrawal_justification string reasons for withdrawing the registration
pending_withdrawal boolean is this registration pending withdrawal?
pending_withdrawal_approval boolean is this registration pending approval?
pending_embargo_approval boolean is the associated Embargo awaiting approval by project admins?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
serializer_class = RegistrationSerializer
view_category = 'nodes'
view_name = 'linked-registrations'
def get_queryset(self):
ret = [node for node in
super(NodeLinkedRegistrationsList, self).get_queryset()
if node.is_registration]
return ret
# overrides APIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(NodeLinkedRegistrationsList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
class NodeViewOnlyLinksList(JSONAPIBaseView, generics.ListCreateAPIView, ListFilterMixin, NodeMixin):
"""
List of view only links on a node. *Writeable*.
###Permissions
View only links on a node, public or private, are readable and writeable only by users that are
administrators on the node.
##Attributes
name type description
=================================================================================
name string name of the view only link
anonymous boolean whether the view only link has anonymized contributors
date_created iso8601 timestamp timestamp when the view only link was created
key string the view only link key
##Relationships
###Creator
The user who created the view only link.
###Nodes
The nodes which this view only link key gives read-only access to.
##Actions
###Create
Method: POST
Body (JSON): {
"data": {
"attributes": {
"name": {string}, #optional
"anonymous": true|false, #optional
}
}
}
Success: 201 CREATED + VOL representation
##Query Params
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
View only links may be filtered by their `name`, `anonymous`, and `date_created` attributes.
#This Request/Response
"""
permission_classes = (
IsAdmin,
base_permissions.TokenHasScope,
drf_permissions.IsAuthenticatedOrReadOnly
)
required_read_scopes = [CoreScopes.NODE_VIEW_ONLY_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_VIEW_ONLY_LINKS_WRITE]
serializer_class = NodeViewOnlyLinkSerializer
view_category = 'nodes'
view_name = 'node-view-only-links'
ordering = ('-date_created',)
def get_default_queryset(self):
return self.get_node().private_links.filter(is_deleted=False)
def get_queryset(self):
return self.get_queryset_from_request()
class NodeViewOnlyLinkDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, NodeMixin):
"""
Detail of a specific view only link on a node. *Writeable*.
###Permissions
View only links on a node, public or private, are only readable and writeable by users that are
administrators on the node.
##Attributes
name type description
=================================================================================
name string name of the view only link
anonymous boolean whether the view only link has anonymized contributors
date_created iso8601 timestamp timestamp when the view only link was created
key string the view only key
##Relationships
###Creator
The user who created the view only link.
###Nodes
The nodes which this view only link key gives read-only access to.
##Actions
###Update
Method: PUT
Body (JSON): {
"data": {
"attributes": {
"name": {string}, #optional
"anonymous": true|false, #optional
},
}
}
Success: 200 OK + VOL representation
###Delete
Method: DELETE
Body (JSON): <none>
Success: 204 NO CONTENT
#This Request/Response
"""
permission_classes = (
IsAdmin,
base_permissions.TokenHasScope,
drf_permissions.IsAuthenticatedOrReadOnly
)
required_read_scopes = [CoreScopes.NODE_VIEW_ONLY_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_VIEW_ONLY_LINKS_WRITE]
serializer_class = NodeViewOnlyLinkSerializer
view_category = 'nodes'
view_name = 'node-view-only-link-detail'
def get_serializer_class(self):
if self.request.method == 'PUT':
return NodeViewOnlyLinkUpdateSerializer
return NodeViewOnlyLinkSerializer
def get_object(self):
try:
return self.get_node().private_links.get(_id=self.kwargs['link_id'])
except PrivateLink.DoesNotExist:
raise NotFound
def perform_destroy(self, link):
assert isinstance(link, PrivateLink), 'link must be a PrivateLink'
link.is_deleted = True
link.save()
enqueue_postcommit_task(ban_url, (self.get_node(),), {}, celery=True, once_per_request=True)
class NodeIdentifierList(NodeMixin, IdentifierList):
"""List of identifiers for a specified node. *Read-only*.
##Identifier Attributes
OSF Identifier entities have the "identifiers" `type`.
name type description
----------------------------------------------------------------------------
category string e.g. 'ark', 'doi'
value string the identifier value itself
##Links
self: this identifier's detail page
##Relationships
###Referent
The identifier is refers to this node.
##Actions
*None*.
##Query Params
Identifiers may be filtered by their category.
#This Request/Response
"""
serializer_class = NodeIdentifierSerializer
node_lookup_url_kwarg = 'node_id'
# overrides IdentifierList
def get_object(self, check_object_permissions=True):
return self.get_node(check_object_permissions=check_object_permissions)
def get_node(self, check_object_permissions=True):
node = get_object_or_error(
Node,
self.kwargs[self.node_lookup_url_kwarg],
display_name='node'
)
# Nodes that are folders/collections are treated as a separate resource, so if the client
# requests a collection through a node endpoint, we return a 404
if node.is_collection:
raise NotFound
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, node)
return node
class NodePreprintsList(JSONAPIBaseView, generics.ListAPIView, NodeMixin, PreprintFilterMixin):
"""List of preprints for a node. *Read-only*.
##Note
**This API endpoint is under active development, and is subject to change in the future.**
Paginated list of preprints ordered by their `date_created`. Each resource contains a representation of the
preprint.
##Preprint Attributes
OSF Preprint entities have the "preprints" `type`.
name type description
====================================================================================
date_created iso8601 timestamp timestamp that the preprint was created
date_modified iso8601 timestamp timestamp that the preprint was last modified
date_published iso8601 timestamp timestamp when the preprint was published
is_published boolean whether or not this preprint is published
is_preprint_orphan boolean whether or not this preprint is orphaned
subjects list of lists of dictionaries ids of Subject in the BePress taxonomy. Dictrionary, containing the subject text and subject ID
provider string original source of the preprint
doi string bare DOI for the manuscript, as entered by the user
##Relationships
###Node
The node that this preprint was created for
###Primary File
The file that is designated as the preprint's primary file, or the manuscript of the preprint.
###Provider
Link to preprint_provider detail for this preprint
##Links
- `self` -- Preprint detail page for the current preprint
- `html` -- Project on the OSF corresponding to the current preprint
- `doi` -- URL representation of the DOI entered by the user for the preprint manuscript
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic,
)
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NODE_PREPRINTS_WRITE]
serializer_class = PreprintSerializer
view_category = 'nodes'
view_name = 'node-preprints'
ordering = ('-date_modified',)
# overrides DjangoFilterMixin
def get_default_django_query(self):
auth = get_user_auth(self.request)
auth_user = getattr(auth, 'user', None)
node = self.get_node()
# Permissions on the node are handled by the permissions_classes
# Permissions on the list objects are handled by the query
return self.preprint_list_django_query(auth_user, node__id=node.id)
# overrides ListAPIView
def get_queryset(self):
return PreprintService.objects.filter(self.get_query_from_request()).distinct()
| apache-2.0 |
arbn/pysaml2 | src/saml2/assertion.py | 1 | 24504 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2011 Umeå University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import re
from saml2.saml import NAME_FORMAT_URI
import xmlenc
from saml2 import saml
from saml2.time_util import instant, in_a_while
from saml2.attribute_converter import from_local
from saml2.s_utils import sid, MissingValue
from saml2.s_utils import factory
from saml2.s_utils import assertion_factory
logger = logging.getLogger(__name__)
def _filter_values(vals, vlist=None, must=False):
""" Removes values from *vals* that does not appear in vlist
:param vals: The values that are to be filtered
:param vlist: required or optional value
:param must: Whether the allowed values must appear
:return: The set of values after filtering
"""
if not vlist: # No value specified equals any value
return vals
if isinstance(vlist, basestring):
vlist = [vlist]
res = []
for val in vlist:
if val in vals:
res.append(val)
if must:
if res:
return res
else:
raise MissingValue("Required attribute value missing")
else:
return res
def _match(attr, ava):
if attr in ava:
return attr
_la = attr.lower()
if _la in ava:
return _la
for _at in ava.keys():
if _at.lower() == _la:
return _at
return None
def filter_on_attributes(ava, required=None, optional=None):
""" Filter
:param ava: An attribute value assertion as a dictionary
:param required: list of RequestedAttribute instances defined to be
required
:param optional: list of RequestedAttribute instances defined to be
optional
:return: The modified attribute value assertion
"""
res = {}
if required is None:
required = []
for attr in required:
found = False
nform = ""
for nform in ["friendly_name", "name"]:
try:
_fn = _match(attr[nform], ava)
except KeyError:
pass
else:
if _fn:
try:
values = [av["text"] for av in attr["attribute_value"]]
except KeyError:
values = []
res[_fn] = _filter_values(ava[_fn], values, True)
found = True
break
if not found:
raise MissingValue("Required attribute missing: '%s'" % (
attr[nform],))
if optional is None:
optional = []
for attr in optional:
for nform in ["friendly_name", "name"]:
if nform in attr:
_fn = _match(attr[nform], ava)
if _fn:
try:
values = [av["text"] for av in attr["attribute_value"]]
except KeyError:
values = []
try:
res[_fn].extend(_filter_values(ava[_fn], values))
except KeyError:
res[_fn] = _filter_values(ava[_fn], values)
return res
def filter_on_demands(ava, required=None, optional=None):
""" Never return more than is needed. Filters out everything
the server is prepared to return but the receiver doesn't ask for
:param ava: Attribute value assertion as a dictionary
:param required: Required attributes
:param optional: Optional attributes
:return: The possibly reduced assertion
"""
# Is all what's required there:
if required is None:
required = {}
lava = dict([(k.lower(), k) for k in ava.keys()])
for attr, vals in required.items():
attr = attr.lower()
if attr in lava:
if vals:
for val in vals:
if val not in ava[lava[attr]]:
raise MissingValue(
"Required attribute value missing: %s,%s" % (attr,
val))
else:
raise MissingValue("Required attribute missing: %s" % (attr,))
if optional is None:
optional = {}
oka = [k.lower() for k in required.keys()]
oka.extend([k.lower() for k in optional.keys()])
# OK, so I can imaging releasing values that are not absolutely necessary
# but not attributes that are not asked for.
for attr in lava.keys():
if attr not in oka:
del ava[lava[attr]]
return ava
def filter_on_wire_representation(ava, acs, required=None, optional=None):
"""
:param ava: A dictionary with attributes and values
:param acs: List of tuples (Attribute Converter name,
Attribute Converter instance)
:param required: A list of saml.Attributes
:param optional: A list of saml.Attributes
:return: Dictionary of expected/wanted attributes and values
"""
acsdic = dict([(ac.name_format, ac) for ac in acs])
if required is None:
required = []
if optional is None:
optional = []
res = {}
for attr, val in ava.items():
done = False
for req in required:
try:
_name = acsdic[req.name_format]._to[attr]
if _name == req.name:
res[attr] = val
done = True
except KeyError:
pass
if done:
continue
for opt in optional:
try:
_name = acsdic[opt.name_format]._to[attr]
if _name == opt.name:
res[attr] = val
break
except KeyError:
pass
return res
def filter_attribute_value_assertions(ava, attribute_restrictions=None):
""" Will weed out attribute values and values according to the
rules defined in the attribute restrictions. If filtering results in
an attribute without values, then the attribute is removed from the
assertion.
:param ava: The incoming attribute value assertion (dictionary)
:param attribute_restrictions: The rules that govern which attributes
and values that are allowed. (dictionary)
:return: The modified attribute value assertion
"""
if not attribute_restrictions:
return ava
for attr, vals in ava.items():
_attr = attr.lower()
try:
_rests = attribute_restrictions[_attr]
except KeyError:
del ava[attr]
else:
if _rests is None:
continue
if isinstance(vals, basestring):
vals = [vals]
rvals = []
for restr in _rests:
for val in vals:
if restr.match(val):
rvals.append(val)
if rvals:
ava[attr] = list(set(rvals))
else:
del ava[attr]
return ava
def restriction_from_attribute_spec(attributes):
restr = {}
for attribute in attributes:
restr[attribute.name] = {}
for val in attribute.attribute_value:
if not val.text:
restr[attribute.name] = None
break
else:
restr[attribute.name] = re.compile(val.text)
return restr
class Policy(object):
""" handles restrictions on assertions """
def __init__(self, restrictions=None):
if restrictions:
self.compile(restrictions)
else:
self._restrictions = None
def compile(self, restrictions):
""" This is only for IdPs or AAs, and it's about limiting what
is returned to the SP.
In the configuration file, restrictions on which values that
can be returned are specified with the help of regular expressions.
This function goes through and pre-compiles the regular expressions.
:param restrictions:
:return: The assertion with the string specification replaced with
a compiled regular expression.
"""
self._restrictions = restrictions.copy()
for who, spec in self._restrictions.items():
if spec is None:
continue
try:
items = spec["entity_categories"]
except KeyError:
pass
else:
ecs = []
for cat in items:
_mod = importlib.import_module(
"saml2.entity_category.%s" % cat)
_ec = {}
for key, items in _mod.RELEASE.items():
_ec[key] = [k.lower() for k in items]
ecs.append(_ec)
spec["entity_categories"] = ecs
try:
restr = spec["attribute_restrictions"]
except KeyError:
continue
if restr is None:
continue
_are = {}
for key, values in restr.items():
if not values:
_are[key.lower()] = None
continue
_are[key.lower()] = [re.compile(value) for value in values]
spec["attribute_restrictions"] = _are
logger.debug("policy restrictions: %s" % self._restrictions)
return self._restrictions
def get_nameid_format(self, sp_entity_id):
""" Get the NameIDFormat to used for the entity id
:param: The SP entity ID
:retur: The format
"""
try:
form = self._restrictions[sp_entity_id]["nameid_format"]
except KeyError:
try:
form = self._restrictions["default"]["nameid_format"]
except KeyError:
form = saml.NAMEID_FORMAT_TRANSIENT
return form
def get_name_form(self, sp_entity_id):
""" Get the NameFormat to used for the entity id
:param: The SP entity ID
:retur: The format
"""
form = NAME_FORMAT_URI
try:
form = self._restrictions[sp_entity_id]["name_form"]
except TypeError:
pass
except KeyError:
try:
form = self._restrictions["default"]["name_form"]
except KeyError:
pass
return form
def get_lifetime(self, sp_entity_id):
""" The lifetime of the assertion
:param sp_entity_id: The SP entity ID
:param: lifetime as a dictionary
"""
# default is a hour
spec = {"hours": 1}
if not self._restrictions:
return spec
try:
spec = self._restrictions[sp_entity_id]["lifetime"]
except KeyError:
try:
spec = self._restrictions["default"]["lifetime"]
except KeyError:
pass
return spec
def get_attribute_restriction(self, sp_entity_id):
""" Return the attribute restriction for SP that want the information
:param sp_entity_id: The SP entity ID
:return: The restrictions
"""
if not self._restrictions:
return None
try:
try:
restrictions = self._restrictions[sp_entity_id][
"attribute_restrictions"]
except KeyError:
try:
restrictions = self._restrictions["default"][
"attribute_restrictions"]
except KeyError:
restrictions = None
except KeyError:
restrictions = None
return restrictions
def entity_category_attributes(self, ec):
if not self._restrictions:
return None
ec_maps = self._restrictions["default"]["entity_categories"]
for ec_map in ec_maps:
try:
return ec_map[ec]
except KeyError:
pass
return []
def get_entity_categories_restriction(self, sp_entity_id, mds):
if not self._restrictions:
return None
restrictions = {}
ec_maps = []
try:
try:
ec_maps = self._restrictions[sp_entity_id]["entity_categories"]
except KeyError:
try:
ec_maps = self._restrictions["default"]["entity_categories"]
except KeyError:
pass
except KeyError:
pass
if ec_maps:
if mds:
try:
ecs = mds.entity_categories(sp_entity_id)
except KeyError:
for ec_map in ec_maps:
for attr in ec_map[""]:
restrictions[attr] = None
else:
for ec_map in ec_maps:
for key, val in ec_map.items():
if key == "": # always released
attrs = val
elif isinstance(key, tuple):
attrs = val
for _key in key:
try:
assert _key in ecs
except AssertionError:
attrs = []
break
elif key in ecs:
attrs = val
else:
attrs = []
for attr in attrs:
restrictions[attr] = None
return restrictions
def not_on_or_after(self, sp_entity_id):
""" When the assertion stops being valid, should not be
used after this time.
:param sp_entity_id: The SP entity ID
:return: String representation of the time
"""
return in_a_while(**self.get_lifetime(sp_entity_id))
def filter(self, ava, sp_entity_id, mdstore, required=None, optional=None):
""" What attribute and attribute values returns depends on what
the SP has said it wants in the request or in the metadata file and
what the IdP/AA wants to release. An assumption is that what the SP
asks for overrides whatever is in the metadata. But of course the
IdP never releases anything it doesn't want to.
:param ava: The information about the subject as a dictionary
:param sp_entity_id: The entity ID of the SP
:param mdstore: A Metadata store
:param required: Attributes that the SP requires in the assertion
:param optional: Attributes that the SP regards as optional
:return: A possibly modified AVA
"""
_rest = self.get_attribute_restriction(sp_entity_id)
if _rest is None:
_rest = self.get_entity_categories_restriction(sp_entity_id,
mdstore)
logger.debug("filter based on: %s" % _rest)
ava = filter_attribute_value_assertions(ava, _rest)
if required or optional:
ava = filter_on_attributes(ava, required, optional)
return ava
def restrict(self, ava, sp_entity_id, metadata=None):
""" Identity attribute names are expected to be expressed in
the local lingo (== friendlyName)
:return: A filtered ava according to the IdPs/AAs rules and
the list of required/optional attributes according to the SP.
If the requirements can't be met an exception is raised.
"""
if metadata:
spec = metadata.attribute_requirement(sp_entity_id)
if spec:
ava = self.filter(ava, sp_entity_id, metadata,
spec["required"], spec["optional"])
return self.filter(ava, sp_entity_id, metadata, [], [])
def conditions(self, sp_entity_id):
""" Return a saml.Condition instance
:param sp_entity_id: The SP entity ID
:return: A saml.Condition instance
"""
return factory(saml.Conditions,
not_before=instant(),
# How long might depend on who's getting it
not_on_or_after=self.not_on_or_after(sp_entity_id),
audience_restriction=[factory(
saml.AudienceRestriction,
audience=[factory(saml.Audience,
text=sp_entity_id)])])
class EntityCategories(object):
pass
class Assertion(dict):
""" Handles assertions about subjects """
def __init__(self, dic=None):
dict.__init__(self, dic)
def _authn_context_decl(self, decl, authn_auth=None):
"""
Construct the authn context with a authn context declaration
:param decl: The authn context declaration
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
return factory(saml.AuthnContext,
authn_context_decl=decl,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
def _authn_context_decl_ref(self, decl_ref, authn_auth=None):
"""
Construct the authn context with a authn context declaration reference
:param decl_ref: The authn context declaration reference
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
return factory(saml.AuthnContext,
authn_context_decl_ref=decl_ref,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
def _authn_context_class_ref(self, authn_class, authn_auth=None):
"""
Construct the authn context with a authn context class reference
:param authn_class: The authn context class reference
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
cntx_class = factory(saml.AuthnContextClassRef, text=authn_class)
if authn_auth:
return factory(saml.AuthnContext,
authn_context_class_ref=cntx_class,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
else:
return factory(saml.AuthnContext,
authn_context_class_ref=cntx_class)
def _authn_statement(self, authn_class=None, authn_auth=None,
authn_decl=None, authn_decl_ref=None):
"""
Construct the AuthnStatement
:param authn_class: Authentication Context Class reference
:param authn_auth: Authenticating Authority
:param authn_decl: Authentication Context Declaration
:param authn_decl_ref: Authentication Context Declaration reference
:return: An AuthnContext instance
"""
if authn_class:
return factory(
saml.AuthnStatement,
authn_instant=instant(),
session_index=sid(),
authn_context=self._authn_context_class_ref(
authn_class, authn_auth))
elif authn_decl:
return factory(
saml.AuthnStatement,
authn_instant=instant(),
session_index=sid(),
authn_context=self._authn_context_decl(authn_decl, authn_auth))
elif authn_decl_ref:
return factory(
saml.AuthnStatement,
authn_instant=instant(),
session_index=sid(),
authn_context=self._authn_context_decl_ref(authn_decl_ref,
authn_auth))
else:
return factory(
saml.AuthnStatement,
authn_instant=instant(),
session_index=sid())
def construct(self, sp_entity_id, in_response_to, consumer_url,
name_id, attrconvs, policy, issuer, authn_class=None,
authn_auth=None, authn_decl=None, encrypt=None,
sec_context=None, authn_decl_ref=None):
""" Construct the Assertion
:param sp_entity_id: The entityid of the SP
:param in_response_to: An identifier of the message, this message is
a response to
:param consumer_url: The intended consumer of the assertion
:param name_id: An NameID instance
:param attrconvs: AttributeConverters
:param policy: The policy that should be adhered to when replying
:param issuer: Who is issuing the statement
:param authn_class: The authentication class
:param authn_auth: The authentication instance
:param authn_decl: An Authentication Context declaration
:param encrypt: Whether to encrypt parts or all of the Assertion
:param sec_context: The security context used when encrypting
:param authn_decl_ref: An Authentication Context declaration reference
:return: An Assertion instance
"""
if policy:
_name_format = policy.get_name_form(sp_entity_id)
else:
_name_format = NAME_FORMAT_URI
attr_statement = saml.AttributeStatement(attribute=from_local(
attrconvs, self, _name_format))
if encrypt == "attributes":
for attr in attr_statement.attribute:
enc = sec_context.encrypt(text="%s" % attr)
encd = xmlenc.encrypted_data_from_string(enc)
encattr = saml.EncryptedAttribute(encrypted_data=encd)
attr_statement.encrypted_attribute.append(encattr)
attr_statement.attribute = []
# start using now and for some time
conds = policy.conditions(sp_entity_id)
if authn_auth or authn_class or authn_decl or authn_decl_ref:
_authn_statement = self._authn_statement(authn_class, authn_auth,
authn_decl, authn_decl_ref)
else:
_authn_statement = None
_ass = assertion_factory(
issuer=issuer,
attribute_statement=[attr_statement],
conditions=conds,
subject=factory(
saml.Subject,
name_id=name_id,
subject_confirmation=[factory(
saml.SubjectConfirmation,
method=saml.SCM_BEARER,
subject_confirmation_data=factory(
saml.SubjectConfirmationData,
in_response_to=in_response_to,
recipient=consumer_url,
not_on_or_after=policy.not_on_or_after(sp_entity_id)))]
),
)
if _authn_statement:
_ass.authn_statement = [_authn_statement]
return _ass
def apply_policy(self, sp_entity_id, policy, metadata=None):
""" Apply policy to the assertion I'm representing
:param sp_entity_id: The SP entity ID
:param policy: The policy
:param metadata: Metadata to use
:return: The resulting AVA after the policy is applied
"""
ava = policy.restrict(self, sp_entity_id, metadata)
self.update(ava)
return ava | bsd-2-clause |
cernops/python-neutronclient | neutronclient/tests/unit/test_client_extension.py | 2 | 3466 | # Copyright 2015 Rackspace Hosting Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import mock
from neutronclient.neutron.v2_0.contrib import _fox_sockets as fox_sockets
from neutronclient.tests.unit import test_cli20
class CLITestV20ExtensionJSON(test_cli20.CLITestV20Base):
def setUp(self):
# need to mock before super because extensions loaded on instantiation
self._mock_extension_loading()
super(CLITestV20ExtensionJSON, self).setUp(plurals={'tags': 'tag'})
def _create_patch(self, name, func=None):
patcher = mock.patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def _mock_extension_loading(self):
ext_pkg = 'neutronclient.common.extension'
contrib = self._create_patch(ext_pkg + '._discover_via_entry_points')
iterator = iter([("_fox_sockets", fox_sockets)])
contrib.return_value.__iter__.return_value = iterator
return contrib
def test_delete_fox_socket(self):
"""Delete fox socket: myid."""
resource = 'fox_socket'
cmd = fox_sockets.FoxInSocketsDelete(test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_update_fox_socket(self):
"""Update fox_socket: myid --name myname."""
resource = 'fox_socket'
cmd = fox_sockets.FoxInSocketsUpdate(test_cli20.MyApp(sys.stdout),
None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname'],
{'name': 'myname'})
def test_create_fox_socket(self):
"""Create fox_socket: myname."""
resource = 'fox_socket'
cmd = fox_sockets.FoxInSocketsCreate(test_cli20.MyApp(sys.stdout),
None)
name = 'myname'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_fox_sockets(self):
"""List fox_sockets."""
resources = 'fox_sockets'
cmd = fox_sockets.FoxInSocketsList(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_show_fox_socket(self):
"""Show fox_socket: --fields id --fields name myid."""
resource = 'fox_socket'
cmd = fox_sockets.FoxInSocketsShow(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
| apache-2.0 |
bhavin04890/finaldashboard | modules/s3/s3import.py | 4 | 120832 | # -*- coding: utf-8 -*-
""" Resource Import Tools
@copyright: 2011-12 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
# @todo: remove all interactive error reporting out of the _private methods, and raise exceptions instead.
__all__ = ["S3Importer", "S3ImportJob", "S3ImportItem"]
import os
import sys
import cPickle
import tempfile
from datetime import datetime
from copy import deepcopy
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
try:
from lxml import etree
except ImportError:
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.serializers import json as jsons
from gluon.storage import Storage, Messages
from gluon.tools import callback
from s3crud import S3CRUD
from s3xml import S3XML
from s3utils import s3_mark_required, s3_has_foreign_key, s3_get_foreign_key
from s3resource import S3Resource
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3IMPORTER: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# =============================================================================
class S3Importer(S3CRUD):
"""
Transformable formats (XML, JSON, CSV) import handler
"""
UPLOAD_TABLE_NAME = "s3_import_upload"
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Apply CRUD methods
@param r: the S3Request
@param attr: dictionary of parameters for the method handler
@returns: output object to send to the view
Known means of communicating with this module:
It expects a URL of the form: /prefix/name/import
It will interpret the http requests as follows:
GET will trigger the upload
POST will trigger either commits or display the import details
DELETE will trigger deletes
It will accept one of the following control vars:
item: to specify a single item in the import job
job: to specify a job
It should not receive both so job takes precedent over item
For CSV imports, the calling controller can add extra fields
to the upload form to add columns to each row in the CSV. To add
the extra fields, pass a named parameter "csv_extra_fields" to the
s3_rest_controller call (or the S3Request call, respectively):
s3_rest_controller(module, resourcename,
csv_extra_fields=[
dict(label="ColumnLabelInTheCSV",
field=field_instance)
])
The Field instance "field" will be added to the upload form, and
the user input will be added to each row of the CSV under the
label as specified. If the "field" validator has options, the
input value will be translated into the option representation,
otherwise the value will be used as-is.
Note that the "label" in the dict is the column label in the CSV,
whereas the field label for the form is to be set in the Field
instance passed as "field".
You can add any arbitrary number of csv_extra_fields to the list.
Additionally, you may want to allow the user to choose whether
the import shall first remove all existing data in the target
table. To do so, pass a label for the "replace_option" to the
request:
s3_rest_controller(module, resourcename,
replace_option=T("Remove existing data before import"))
This will add the respective checkbox to the upload form.
You may also want to provide a link to download a CSV template from
the upload form. To do that, add the resource name to the request
attributes:
s3_rest_controller(module, resourcename,
csv_template="<resourcename>")
This will provide a link to:
- static/formats/s3csv/<controller>/<resourcename>.csv
at the top of the upload form.
"""
_debug("S3Importer.apply_method(%s)" % r)
# Messages
T = current.T
messages = self.messages = Messages(T)
messages.download_template = "Download Template"
messages.invalid_file_format = "Invalid File Format"
messages.unsupported_file_type = "Unsupported file type of %s"
messages.stylesheet_not_found = "No Stylesheet %s could be found to manage the import file."
messages.no_file = "No file submitted"
messages.file_open_error = "Unable to open the file %s"
messages.file_not_found = "The file to upload is missing"
messages.no_records_to_import = "No records to import"
messages.no_job_to_delete = "No job to delete, maybe it has already been deleted."
messages.title_job_read = "Details of the selected import job"
messages.title_job_list = "List of import items"
messages.file_uploaded = "Import file uploaded"
messages.upload_submit_btn = "Upload Data File"
messages.open_btn = "Open"
messages.view_btn = "View"
messages.delete_btn = "Delete"
messages.item_show_details = "Display Details"
messages.job_total_records = "Total records in the Import Job"
messages.job_records_selected = "Records selected"
messages.job_deleted = "Import job deleted"
messages.job_completed = "Job run on %s. With result of (%s)"
messages.import_file = "Import File"
messages.import_file_comment = "Upload a file formatted according to the Template."
messages.user_name = "User Name"
messages.commit_total_records_imported = "%s records imported"
messages.commit_total_records_ignored = "%s records ignored"
messages.commit_total_errors = "%s records in error"
try:
self.uploadTitle = current.response.s3.crud_strings[self.tablename].title_upload or T("Import")
except:
self.uploadTitle = T("Import")
# @todo: correct to switch this off for the whole session?
current.session.s3.ocr_enabled = False
# Reset all errors/warnings
self.error = None
self.warning = None
# CSV upload configuration
if "csv_stylesheet" in attr:
self.csv_stylesheet = attr["csv_stylesheet"]
else:
self.csv_stylesheet = None
self.csv_extra_fields = None
self.csv_extra_data = None
# Environment
self.controller = r.controller
self.function = r.function
# Target table for the data import
self.controller_resource = self.resource
self.controller_table = self.table
self.controller_tablename = self.tablename
# Table for uploads
self.__define_table()
self.upload_resource = None
self.item_resource = None
# XSLT Path
self.xslt_path = os.path.join(r.folder, r.XSLT_PATH)
self.xslt_extension = r.XSLT_EXTENSION
# Check authorization
permitted = current.auth.s3_has_permission
authorised = permitted("create", self.upload_tablename) and \
permitted("create", self.controller_tablename)
if not authorised:
if r.method is not None:
r.unauthorised()
else:
return dict(form=None)
# @todo: clean this up
source = None
open_file = None
transform = None
upload_id = None
items = None
# @todo get the data from either get_vars or post_vars appropriately
# for post -> commit_items would need to add the uploadID
if "transform" in r.get_vars:
transform = r.get_vars["transform"]
if "filename" in r.get_vars:
source = r.get_vars["filename"]
if "job" in r.post_vars:
upload_id = r.post_vars["job"]
elif "job" in r.get_vars:
upload_id = r.get_vars["job"]
items = self._process_item_list(upload_id, r.vars)
if "delete" in r.get_vars:
r.http = "DELETE"
# If we have an upload ID, then get upload and import job
self.upload_id = upload_id
query = (self.upload_table.id == upload_id)
self.upload_job = current.db(query).select(limitby=(0, 1)).first()
if self.upload_job:
self.job_id = self.upload_job.job_id
else:
self.job_id = None
# Experimental uploading via ajax - added for vulnerability
# Part of the problem with this is that it works directly with the
# opened file. This might pose a security risk, is should be alright
# if only trusted users are involved but care should be taken with this
self.ajax = current.request.ajax and r.post_vars.approach == "ajax"
# Now branch off to the appropriate controller function
if r.http == "GET":
if source != None:
self.commit(source, transform)
output = self.upload(r, **attr)
if upload_id != None:
output = self.display_job(upload_id)
else:
output = self.upload(r, **attr)
elif r.http == "POST":
if items != None:
output = self.commit_items(upload_id, items)
else:
output = self.generate_job(r, **attr)
elif r.http == "DELETE":
if upload_id != None:
output = self.delete_job(upload_id)
else:
r.error(405, current.manager.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def upload(self, r, **attr):
"""
This will display the upload form
It will ask for a file to be uploaded or for a job to be selected.
If a file is uploaded then it will guess at the file type and
ask for the transform file to be used. The transform files will
be in a dataTable with the module specific files shown first and
after those all other known transform files. Once the transform
file is selected the import process can be started which will
generate an importJob, and a "POST" method will occur
If a job is selected it will have two actions, open and delete.
Open will mean that a "GET" method will occur, with the job details
passed in.
Whilst the delete action will trigger a "DELETE" method.
"""
_debug("S3Importer.upload()")
request = self.request
form = self._upload_form(r, **attr)
output = self._create_upload_dataTable()
if request.representation == "aadata":
return output
output.update(form=form, title=self.uploadTitle)
return output
# -------------------------------------------------------------------------
def generate_job(self, r, **attr):
"""
Generate an ImportJob from the submitted upload form
"""
_debug("S3Importer.display()")
response = current.response
s3 = response.s3
db = current.db
table = self.upload_table
output = None
if self.ajax:
sfilename = ofilename = r.post_vars["file"].filename
upload_id = table.insert(controller=self.controller,
function=self.function,
filename=ofilename,
file = sfilename,
user_id=current.session.auth.user.id
)
else:
title=self.uploadTitle
form = self._upload_form(r, **attr)
r = self.request
r.read_body()
sfilename = form.vars.file
try:
ofilename = r.post_vars["file"].filename
except:
form.errors.file = self.messages.no_file
if form.errors:
response.flash = ""
output = self._create_upload_dataTable()
output.update(form=form, title=title)
elif not sfilename or \
ofilename not in r.files or r.files[ofilename] is None:
response.flash = ""
response.error = self.messages.file_not_found
output = self._create_upload_dataTable()
output.update(form=form, title=title)
else:
query = (table.file == sfilename)
db(query).update(controller=self.controller,
function=self.function,
filename=ofilename,
user_id=current.session.auth.user.id)
row = db(query).select(table.id, limitby=(0, 1)).first()
upload_id = row.id
if not output:
output = dict()
# must commit here to separate this transaction from
# the trial import phase which will be rolled back.
db.commit()
extension = ofilename.rsplit(".", 1).pop()
if extension not in ("csv", "xls"):
response.flash = None
response.error = self.messages.invalid_file_format
return self.upload(r, **attr)
if self.ajax:
upload_file = r.post_vars.file.file
else:
upload_file = r.files[ofilename]
if extension == "xls":
if "xls_parser" in s3:
upload_file.seek(0)
upload_file = s3.xls_parser(upload_file.read())
extension = "csv"
if upload_file is None:
response.flash = None
response.error = self.messages.file_not_found
return self.upload(r, **attr)
else:
upload_file.seek(0)
if "single_pass" in r.vars:
single_pass = r.vars["single_pass"]
else:
single_pass = None
self._generate_import_job(upload_id,
upload_file,
extension,
commit_job = single_pass)
if upload_id is None:
row = db(query).update(status = 2) # in error
if self.error != None:
response.error = self.error
if self.warning != None:
response.warning = self.warning
response.flash = ""
return self.upload(r, **attr)
else:
if single_pass:
current.session.flash = self.messages.file_uploaded
# For a single pass retain the vars from the original URL
next_URL = URL(r=self.request,
f=self.function,
args=["import"],
vars=current.request.get_vars
)
redirect(next_URL)
s3.dataTable_vars = {"job" : upload_id}
return self.display_job(upload_id)
return output
# -------------------------------------------------------------------------
def display_job(self, upload_id):
"""
@todo: docstring?
"""
_debug("S3Importer.display_job()")
request = self.request
response = current.response
db = current.db
table = self.upload_table
job_id = self.job_id
output = dict()
if job_id == None:
# redirect to the start page (removes all vars)
query = (table.id == upload_id)
row = db(query).update(status = 2) # in error
current.session.warning = self.messages.no_records_to_import
redirect(URL(r=request, f=self.function, args=["import"]))
# Get the status of the upload job
query = (table.id == upload_id)
row = db(query).select(table.status,
table.modified_on,
table.summary_added,
table.summary_error,
table.summary_ignored,
limitby=(0, 1)).first()
status = row.status
# completed display details
if status == 3: # Completed
# @todo currently this is an unnecessary server call,
# change for completed records to be a display details
# and thus avoid the round trip.
# but keep this code to protect against hand-crafted URLs
# (and the 'go back' syndrome on the browser)
result = (row.summary_added,
row.summary_error,
row.summary_ignored,
)
self._display_completed_job(result, row.modified_on)
redirect(URL(r=request, f=self.function, args=["import"]))
# otherwise display import items
response.view = self._view(request, "list.html")
output = self._create_import_item_dataTable(upload_id, job_id)
if request.representation == "aadata":
return output
if response.s3.error_report:
error_report = "Errors|" + "|".join(response.s3.error_report)
error_tip = A("All Errors",
_class="errortip",
_title=error_report)
else:
# @todo: restore the error tree from all items?
error_tip = ""
rowcount = len(self._get_all_items(upload_id))
rheader = DIV(TABLE(
TR(
TH("%s: " % self.messages.job_total_records),
TD(rowcount, _id="totalAvaliable"),
TH("%s: " % self.messages.job_records_selected),
TD(0, _id="totalSelected"),
TH(error_tip)
),
))
output["title"] = self.messages.title_job_read
output["rheader"] = rheader
output["subtitle"] = self.messages.title_job_list
return output
# -------------------------------------------------------------------------
def commit(self, source, transform):
"""
@todo: docstring?
"""
_debug("S3Importer.commit(%s, %s)" % (source, transform))
db = current.db
session = current.session
request = self.request
try:
openFile = open(source, "r")
except:
session.error = self.messages.file_open_error % source
redirect(URL(r=request, f=self.function))
# @todo: manage different file formats
# @todo: find file format from request.extension
fileFormat = "csv"
# Insert data in the table and get the ID
try:
user = session.auth.user.id
except:
user = None
upload_id = self.upload_table.insert(controller=self.controller,
function=self.function,
filename = source,
user_id = user,
status = 1)
db.commit()
# Create the import job
result = self._generate_import_job(upload_id,
openFile,
fileFormat,
stylesheet=transform
)
if result == None:
if self.error != None:
if session.error == None:
session.error = self.error
else:
session.error += self.error
if self.warning != None:
if session.warning == None:
session.warning = self.warning
else:
session.warning += self.warning
else:
items = self._get_all_items(upload_id, True)
# Commit the import job
self._commit_import_job(upload_id, items)
result = self._update_upload_job(upload_id)
# Get the results and display
msg = "%s : %s %s %s" % (source,
self.messages.commit_total_records_imported,
self.messages.commit_total_errors,
self.messages.commit_total_records_ignored)
msg = msg % result
if session.flash == None:
session.flash = msg
else:
session.flash += msg
# @todo: return the upload_id?
# -------------------------------------------------------------------------
def commit_items(self, upload_id, items):
"""
@todo: docstring?
"""
_debug("S3Importer.commit_items(%s, %s)" % (upload_id, items))
# Save the import items
self._commit_import_job(upload_id, items)
# Update the upload table
# change the status to completed
# record the summary details
# delete the upload file
result = self._update_upload_job(upload_id)
if self.ajax:
return result
# redirect to the start page (removes all vars)
self._display_completed_job(result)
redirect(URL(r=self.request, f=self.function, args=["import"]))
# -------------------------------------------------------------------------
def delete_job(self, upload_id):
"""
Delete an uploaded file and the corresponding import job
@param upload_id: the upload ID
"""
_debug("S3Importer.delete_job(%s)" % (upload_id))
db = current.db
request = self.request
resource = request.resource # use self.resource?
response = current.response
# Get the import job ID
job_id = self.job_id
# Delete the import job (if any)
if job_id:
result = resource.import_xml(None,
id = None,
tree = None,
job_id = job_id,
delete_job = True)
# @todo: check result
# now delete the upload entry
query = (self.upload_table.id == upload_id)
count = db(query).delete()
# @todo: check that the record has been deleted
# Now commit the changes
db.commit()
result = count
# return to the main import screen
# @todo: check result properly
if result == False:
response.warning = self.messages.no_job_to_delete
else:
response.flash = self.messages.job_deleted
# redirect to the start page (remove all vars)
self.next = self.request.url(vars=dict())
return
# ========================================================================
# Utility methods
# ========================================================================
def _upload_form(self, r, **attr):
"""
Create and process the upload form, including csv_extra_fields
"""
EXTRA_FIELDS = "csv_extra_fields"
TEMPLATE = "csv_template"
REPLACE_OPTION = "replace_option"
response = current.response
s3 = response.s3
request = self.request
table = self.upload_table
formstyle = s3.crud.formstyle
response.view = self._view(request, "list_create.html")
if REPLACE_OPTION in attr:
replace_option = attr[REPLACE_OPTION]
if replace_option is not None:
table.replace_option.readable = True
table.replace_option.writable = True
table.replace_option.label = replace_option
table.replace_option.comment = DIV(_class="tooltip",
_title="%s|%s" % \
(replace_option,
current.T("Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.")))
fields = [f for f in table if f.readable or f.writable and not f.compute]
if EXTRA_FIELDS in attr:
extra_fields = attr[EXTRA_FIELDS]
if extra_fields is not None:
fields.extend([f["field"] for f in extra_fields if "field" in f])
self.csv_extra_fields = extra_fields
labels, required = s3_mark_required(fields)
if required:
s3.has_required = True
form = SQLFORM.factory(table_name=self.UPLOAD_TABLE_NAME,
labels=labels,
formstyle=formstyle,
upload = os.path.join(request.folder, "uploads", "imports"),
separator = "",
message=self.messages.file_uploaded,
*fields)
args = ["s3csv"]
template = attr.get(TEMPLATE, True)
if template is True:
args.extend([self.controller, "%s.csv" % self.function])
elif isinstance(template, basestring):
args.extend([self.controller, "%s.csv" % template])
elif isinstance(template, (tuple, list)):
args.extend(template[:-1])
args.append("%s.csv" % template[-1])
else:
template = None
if template is not None:
url = URL(r=request, c="static", f="formats", args=args)
try:
# only add the download link if the template can be opened
open("%s/../%s" % (r.folder, url))
form[0][0].insert(0, TR(TD(A(self.messages.download_template,
_href=url)),
_id="template__row"))
except:
pass
if form.accepts(r.post_vars, current.session,
formname="upload_form"):
upload_id = table.insert(**table._filter_fields(form.vars))
if self.csv_extra_fields:
self.csv_extra_data = Storage()
for f in self.csv_extra_fields:
label = f.get("label", None)
if not label:
continue
field = f.get("field", None)
value = f.get("value", None)
if field:
if field.name in form.vars:
data = form.vars[field.name]
else:
data = field.default
value = data
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
try:
options = requires.options()
except:
pass
else:
for k, v in options:
if k == str(data):
value = v
elif value is None:
continue
self.csv_extra_data[label] = value
s3.no_formats = True
return form
# -------------------------------------------------------------------------
def _create_upload_dataTable(self):
"""
List of previous Import jobs
"""
db = current.db
request = self.request
controller = self.controller
function = self.function
s3 = current.response.s3
table = self.upload_table
s3.filter = (table.controller == controller) & \
(table.function == function)
fields = ["id",
"filename",
"created_on",
"user_id",
"replace_option",
"status"]
self._use_upload_table()
# Hide the list of prior uploads for now
#output = self._dataTable(fields, sort_by = [[2,"desc"]])
output = dict()
self._use_controller_table()
if request.representation == "aadata":
return output
query = (table.status != 3) # Status of Pending or in-Error
rows = db(query).select(table.id)
restrictOpen = [str(row.id) for row in rows]
query = (table.status == 3) # Status of Completed
rows = db(query).select(table.id)
restrictView = [str(row.id) for row in rows]
s3.actions = [
dict(label=str(self.messages.open_btn),
_class="action-btn",
url=URL(r=request,
c=controller,
f=function,
args=["import"],
vars={"job":"[id]"}),
restrict = restrictOpen
),
dict(label=str(self.messages.view_btn),
_class="action-btn",
url=URL(r=request,
c=controller,
f=function,
args=["import"],
vars={"job":"[id]"}),
restrict = restrictView
),
dict(label=str(self.messages.delete_btn),
_class="delete-btn",
url=URL(r=request,
c=controller,
f=function,
args=["import"],
vars={"job":"[id]",
"delete":"True"
}
)
),
]
# Display an Error if no job is attached with this record
query = (table.status == 1) # Pending
rows = db(query).select(table.id)
s3.dataTableStyleAlert = [str(row.id) for row in rows]
query = (table.status == 2) # in error
rows = db(query).select(table.id)
s3.dataTableStyleWarning = [str(row.id) for row in rows]
return output
# -------------------------------------------------------------------------
def _create_import_item_dataTable(self, upload_id, job_id):
"""
@todo: docstring?
"""
s3 = current.response.s3
T = current.T
represent = {"s3_import_item.element" : self._item_element_represent}
self._use_import_item_table(job_id)
# Add a filter to the dataTable query
s3.filter = (self.table.job_id == job_id) & \
(self.table.tablename == self.controller_tablename)
# Get a list of the records that have an error of None
query = (self.table.job_id == job_id) & \
(self.table.tablename == self.controller_tablename)
rows = current.db(query).select(self.table.id, self.table.error)
select_list = []
error_list = []
for row in rows:
if row.error:
error_list.append(str(row.id))
else:
select_list.append("%s" % row.id)
# Experimental uploading via ajax - added for vulnerability
if self.ajax:
resource = self.resource
resource.add_filter(s3.filter)
rows = resource.select(["id", "element", "error"],
start=0,
limit=resource.count(),
)
data = resource.extract(rows,
["id", "element", "error"],
)
return (upload_id, select_list, data)
s3.actions = [
dict(label= str(self.messages.item_show_details),
_class="action-btn",
_jqclick="$('.importItem.'+id).toggle();",
),
]
output = self._dataTable(["id", "element", "error"],
sort_by = [[1, "asc"]],
represent=represent,
ajax_item_id=upload_id,
dt_bulk_select = select_list)
self._use_controller_table()
if self.request.representation == "aadata":
return output
# Highlight rows in error in red
s3.dataTableStyleWarning = error_list
form = output["items"]
job = INPUT(_type="hidden", _id="importUploadID", _name="job",
_value="%s" % upload_id)
form.append(job)
return output
# -------------------------------------------------------------------------
def _generate_import_job(self,
upload_id,
openFile,
fileFormat,
stylesheet=None,
commit_job=False):
"""
This will take a s3_import_upload record and
generate the importJob
@param uploadFilename: The name of the uploaded file
@todo: complete parameter descriptions
"""
_debug("S3Importer._generate_import_job(%s, %s, %s, %s)" % (upload_id,
openFile,
fileFormat,
stylesheet
)
)
# ---------------------------------------------------------------------
# CSV
if fileFormat == "csv" or fileFormat == "comma-separated-values":
fmt = "csv"
src = openFile
# ---------------------------------------------------------------------
# XML
# @todo: implement
#elif fileFormat == "xml":
# ---------------------------------------------------------------------
# S3JSON
# @todo: implement
#elif fileFormat == "s3json":
# ---------------------------------------------------------------------
# PDF
# @todo: implement
#elif fileFormat == "pdf":
# ---------------------------------------------------------------------
# Unsupported Format
else:
msg = self.messages.unsupported_file_type % fileFormat
self.error = msg
_debug(msg)
return None
# Get the stylesheet
if stylesheet == None:
stylesheet = self._get_stylesheet()
if stylesheet == None:
return None
request = self.request
resource = request.resource
# Before calling import tree ensure the db.table is the controller_table
self.table = self.controller_table
self.tablename = self.controller_tablename
# Pass stylesheet arguments
args = Storage()
mode = request.get_vars.get("xsltmode", None)
if mode is not None:
args.update(mode=mode)
# Generate the import job
resource.import_xml(src,
format=fmt,
extra_data=self.csv_extra_data,
stylesheet=stylesheet,
ignore_errors = True,
commit_job = commit_job,
**args)
job = resource.job
if job is None:
if resource.error:
# Error
self.error = resource.error
return None
else:
# Nothing to import
self.warning = self.messages.no_records_to_import
return None
else:
# Job created
db = current.db
job_id = job.job_id
errors = current.xml.collect_errors(job)
if errors:
current.response.s3.error_report = errors
query = (self.upload_table.id == upload_id)
result = db(query).update(job_id=job_id)
# @todo: add check that result == 1, if not we are in error
# Now commit the changes
db.commit()
self.job_id = job_id
return True
# -------------------------------------------------------------------------
def _get_stylesheet(self, file_format="csv"):
"""
Get the stylesheet for transformation of the import
@param file_format: the import source file format
"""
if file_format == "csv":
xslt_path = os.path.join(self.xslt_path, "s3csv")
else:
xslt_path = os.path.join(self.xslt_path, file_format, "import.xsl")
return xslt_path
# Use the "csv_stylesheet" parameter to override the CSV stylesheet subpath
# and filename, e.g.
# s3_rest_controller(module, resourcename,
# csv_stylesheet=("inv", "inv_item.xsl"))
if self.csv_stylesheet:
if isinstance(self.csv_stylesheet, (tuple, list)):
stylesheet = os.path.join(xslt_path,
*self.csv_stylesheet)
else:
stylesheet = os.path.join(xslt_path,
self.controller,
self.csv_stylesheet)
else:
xslt_filename = "%s.%s" % (self.function, self.xslt_extension)
stylesheet = os.path.join(xslt_path,
self.controller,
xslt_filename)
if os.path.exists(stylesheet) is False:
msg = self.messages.stylesheet_not_found % stylesheet
self.error = msg
_debug(msg)
return None
return stylesheet
# -------------------------------------------------------------------------
def _commit_import_job(self, upload_id, items):
"""
This will save all of the selected import items
@todo: parameter descriptions?
"""
_debug("S3Importer._commit_import_job(%s, %s)" % (upload_id, items))
db = current.db
resource = self.request.resource
# Load the items from the s3_import_item table
self.importDetails = dict()
table = self.upload_table
query = (table.id == upload_id)
row = db(query).select(table.job_id,
table.replace_option,
limitby=(0, 1)).first()
if row is None:
return False
else:
job_id = row.job_id
current.response.s3.import_replace = row.replace_option
itemTable = S3ImportJob.define_item_table()
if itemTable != None:
#****************************************************************
# EXPERIMENTAL
# This doesn't delete related items
# but import_tree will tidy it up later
#****************************************************************
# Get all the items selected for import
rows = self._get_all_items(upload_id, as_string=True)
# Loop through each row and delete the items not required
self._store_import_details(job_id, "preDelete")
for id in rows:
if str(id) not in items:
# @todo: replace with a helper method from the API
_debug("Deleting item.id = %s" % id)
query = (itemTable.id == id)
db(query).delete()
#****************************************************************
# EXPERIMENTAL
#****************************************************************
# Set up the table we will import data into
self.table = self.controller_table
self.tablename = self.controller_tablename
self._store_import_details(job_id, "preImportTree")
# Now commit the remaining items
msg = resource.import_xml(None,
job_id = job_id,
ignore_errors = True)
return resource.error is None
# -------------------------------------------------------------------------
def _store_import_details(self, job_id, key):
"""
This will store the details from an importJob
@todo: parameter descriptions?
"""
_debug("S3Importer._store_import_details(%s, %s)" % (job_id, key))
itemTable = S3ImportJob.define_item_table()
query = (itemTable.job_id == job_id) & \
(itemTable.tablename == self.controller_tablename)
rows = current.db(query).select(itemTable.data, itemTable.error)
items = [dict(data=row.data, error=row.error) for row in rows]
self.importDetails[key] = items
# -------------------------------------------------------------------------
def _update_upload_job(self, upload_id):
"""
This will record the results from the import, and change the
status of the upload job
@todo: parameter descriptions?
@todo: report errors in referenced records, too
"""
_debug("S3Importer._update_upload_job(%s)" % (upload_id))
request = self.request
resource = request.resource
db = current.db
totalPreDelete = len(self.importDetails["preDelete"])
totalPreImport = len(self.importDetails["preImportTree"])
totalIgnored = totalPreDelete - totalPreImport
if resource.error_tree is None:
totalErrors = 0
else:
totalErrors = len(resource.error_tree.findall(
"resource[@name='%s']" % resource.tablename))
totalRecords = totalPreImport - totalErrors
if totalRecords < 0:
totalRecords = 0
query = (self.upload_table.id == upload_id)
result = db(query).update(summary_added=totalRecords,
summary_error=totalErrors,
summary_ignored = totalIgnored,
status = 3)
# Now commit the changes
db.commit()
return (totalRecords, totalErrors, totalIgnored)
# -------------------------------------------------------------------------
def _display_completed_job(self, totals, timestmp=None):
"""
Generate a summary flash message for a completed import job
@param totals: the job totals as tuple
(total imported, total errors, total ignored)
@param timestmp: the timestamp of the completion
"""
session = current.session
msg = "%s - %s - %s" % \
(self.messages.commit_total_records_imported,
self.messages.commit_total_errors,
self.messages.commit_total_records_ignored)
msg = msg % totals
if timestmp != None:
session.flash = self.messages.job_completed % \
(self.date_represent(timestmp), msg)
elif totals[1] is not 0:
session.error = msg
elif totals[2] is not 0:
session.warning = msg
else:
session.flash = msg
# -------------------------------------------------------------------------
def _dataTable(self,
list_fields,
sort_by = [[1, "asc"]],
represent={},
ajax_item_id=None,
dt_bulk_select=[],
):
"""
Method to get the data for the dataTable
This can be either a raw html representation or
and ajax call update
Additional data will be cached to limit calls back to the server
@param list_fields: list of field names
@param sort_by: list of sort by columns
@param represent: a dict of field callback functions used
to change how the data will be displayed
keyed on the field identifier
@return: a dict()
In html representations this will be a table of the data
plus the sortby instructions
In ajax this will be a json response
In addition the following values will be made available:
totalRecords Number of records in the filtered data set
totalDisplayRecords Number of records to display
start Start point in the ordered data set
limit Number of records in the ordered set
NOTE: limit - totalDisplayRecords = total cached
"""
from s3.s3utils import S3DataTable
request = self.request
resource = self.resource
s3 = current.response.s3
# Filter
if s3.filter is not None:
self.resource.add_filter(s3.filter)
representation = self.request.representation
if representation == "aadata":
vars = request.get_vars
start = vars.get("iDisplayStart", None)
limit = vars.get("iDisplayLength", None)
sEcho = int(vars.sEcho or 0)
else: # catch all
start = 0
limit = current.manager.ROWSPERPAGE
if limit is not None:
try:
start = int(start)
limit = int(limit)
except ValueError:
start = None
limit = None # use default
else:
start = None # use default
rows = resource.select(list_fields,
start=start,
limit=limit,
)
data = resource.extract(rows,
list_fields,
)
# put each value through the represent function
for row in data:
for (key, value) in row.items():
if key in represent:
row[key] = represent[key](row["s3_import_item.id"], value);
rfields = resource.resolve_selectors(list_fields)[0]
dt = S3DataTable(rfields, data)
id = "s3import_1"
if representation == "aadata":
totalrows = self.resource.count()
return dt.json(totalrows,
totalrows,
id,
sEcho,
dt_bulk_actions = [current.T("Import")],
)
else:
output = dict()
url = "/%s/%s/%s/import.aadata?job=%s" % (request.application,
request.controller,
request.function,
ajax_item_id
)
totalrows = self.resource.count()
items = dt.html(totalrows,
totalrows,
id,
dt_ajax_url=url,
dt_bulk_actions = [current.T("Import")],
dt_bulk_selected = dt_bulk_select,
)
current.response.s3.dataTableID = ["s3import_1"]
output.update(items=items)
return output
# -------------------------------------------------------------------------
def _item_element_represent(self, id, value):
"""
Represent the element in an import item for dataTable display
@param value: the string containing the element
"""
T = current.T
db = current.db
value = S3XML.xml_decode(value)
try:
element = etree.fromstring(value)
except:
# XMLSyntaxError: return the element as-is
return DIV(value)
tablename = element.get("name")
table = current.db[tablename]
output = DIV()
details = TABLE(_class="importItem %s" % id)
header, rows = self._add_item_details(element.findall("data"), table)
if header is not None:
output.append(header)
# Add components, if present
components = element.findall("resource")
for component in components:
ctablename = component.get("name")
ctable = db[ctablename]
self._add_item_details(component.findall("data"), ctable,
details=rows, prefix=True)
if rows:
details.append(TBODY(rows))
# Add error messages, if present
errors = current.xml.collect_errors(element)
if errors:
details.append(TFOOT(TR(TH("%s:" % T("Errors")),
TD(UL([LI(e) for e in errors])))))
if rows == [] and components == []:
# At this stage we don't have anything to display to see if we can
# find something to show. This could be the case when a table being
# imported is a resolver for a many to many relationship
refdetail = TABLE(_class="importItem %s" % id)
references = element.findall("reference")
for reference in references:
tuid = reference.get("tuid")
resource = reference.get("resource")
refdetail.append(TR(TD(resource), TD(tuid)))
output.append(refdetail)
else:
output.append(details)
return str(output)
# -------------------------------------------------------------------------
@staticmethod
def _add_item_details(data, table, details=None, prefix=False):
"""
Add details of the item element
@param data: the list of data elements in the item element
@param table: the table for the data
@param details: the existing details rows list (to append to)
"""
tablename = table._tablename
if details is None:
details = []
first = None
firstString = None
header = None
for child in data:
f = child.get("field", None)
if f not in table.fields:
continue
elif f == "wkt":
# Skip bulky WKT fields
continue
field = table[f]
ftype = str(field.type)
value = child.get("value", None)
if not value:
value = child.text
try:
value = S3Importer._decode_data(field, value)
except:
pass
if value:
value = S3XML.xml_encode(unicode(value))
else:
value = ""
if f != None and value != None:
headerText = P(B("%s: " % f), value)
if not first:
first = headerText
if ftype == "string" and not firstString:
firstString = headerText
if f == "name":
header = headerText
if prefix:
details.append(TR(TH("%s.%s:" % (tablename, f)), TD(value)))
else:
details.append(TR(TH("%s:" % f), TD(value)))
if not header:
if firstString:
header = firstString
else:
header = first
return (header, details)
# -------------------------------------------------------------------------
@staticmethod
def _decode_data(field, value):
"""
Try to decode string data into their original type
@param field: the Field instance
@param value: the stringified value
@todo: replace this by ordinary decoder
"""
if field.type == "string" or \
field.type == "string" or \
field.type == "password" or \
field.type == "upload" or \
field.type == "text":
return value
elif field.type == "integer" or field.type == "id":
return int(value)
elif field.type == "double" or field.type == "decimal":
return double(value)
elif field.type == 'boolean':
if value and not str(value)[:1].upper() in ["F", "0"]:
return "T"
else:
return "F"
elif field.type == "date":
return value # @todo fix this to get a date
elif field.type == "time":
return value # @todo fix this to get a time
elif field.type == "datetime":
return value # @todo fix this to get a datetime
else:
return value
# -------------------------------------------------------------------------
@staticmethod
def date_represent(date_obj):
"""
Represent a datetime object as string
@param date_obj: the datetime object
@todo: replace by S3DateTime method?
"""
return date_obj.strftime("%d %B %Y, %I:%M%p")
# -------------------------------------------------------------------------
def _process_item_list(self, upload_id, vars):
"""
Get the list of IDs for the selected items from the "mode"
and "selected" request variables
@param upload_id: the upload_id
@param vars: the request variables
"""
items = None
if "mode" in vars:
mode = vars["mode"]
if "selected" in vars:
selected = vars["selected"]
else:
selected = []
if mode == "Inclusive":
items = selected
elif mode == "Exclusive":
all_items = self._get_all_items(upload_id, as_string=True)
items = [i for i in all_items if i not in selected]
return items
# -------------------------------------------------------------------------
def _get_all_items(self, upload_id, as_string=False):
"""
Get a list of the record IDs of all import items for
the the given upload ID
@param upload_id: the upload ID
@param as_string: represent each ID as string
"""
item_table = S3ImportJob.define_item_table()
upload_table = self.upload_table
query = (upload_table.id == upload_id) & \
(item_table.job_id == upload_table.job_id) & \
(item_table.tablename == self.controller_tablename)
rows = current.db(query).select(item_table.id)
if as_string:
items = [str(row.id) for row in rows]
else:
items = [row.id for row in rows]
return items
# -------------------------------------------------------------------------
def _use_upload_table(self):
"""
Set the resource and the table to being s3_import_upload
"""
self.tablename = self.upload_tablename
if self.upload_resource == None:
self.upload_resource = current.s3db.resource(self.tablename)
self.resource = self.upload_resource
self.table = self.upload_table
# -------------------------------------------------------------------------
def _use_controller_table(self):
"""
Set the resource and the table to be the imported resource
"""
self.resource = self.controller_resource
self.table = self.controller_table
self.tablename = self.controller_tablename
# -------------------------------------------------------------------------
def _use_import_item_table(self, job_id):
"""
Set the resource and the table to being s3_import_item
"""
self.table = S3ImportJob.define_item_table()
self.tablename = S3ImportJob.ITEM_TABLE_NAME
if self.item_resource == None:
self.item_resource = current.s3db.resource(self.tablename)
self.resource = self.item_resource
# -------------------------------------------------------------------------
def __define_table(self):
""" Configures the upload table """
_debug("S3Importer.__define_table()")
T = current.T
db = current.db
request = current.request
self.upload_tablename = self.UPLOAD_TABLE_NAME
import_upload_status = {
1: T("Pending"),
2: T("In error"),
3: T("Completed"),
}
def user_name_represent(id):
# @todo: use s3_represent_user?
table = db.auth_user
query = (table.id == id)
row = db(query).select(table.first_name,
table.last_name,
limitby=(0, 1)).first()
if row:
rep_str = "%s %s" % (row.first_name, row.last_name)
else:
rep_str = current.messages.NONE
return rep_str
def status_represent(index):
if index == None:
return "Unknown" # @todo: use messages (internationalize)
else:
return import_upload_status[index]
now = request.utcnow
table = self.define_upload_table()
table.file.upload_folder = os.path.join(request.folder,
"uploads",
#"imports"
)
table.file.comment = DIV(_class="tooltip",
_title="%s|%s" %
(self.messages.import_file,
self.messages.import_file_comment))
table.file.label = self.messages.import_file
table.status.requires = IS_IN_SET(import_upload_status, zero=None)
table.status.represent = status_represent
table.user_id.label = self.messages.user_name
table.user_id.represent = user_name_represent
table.created_on.default = now
table.created_on.represent = self.date_represent
table.modified_on.default = now
table.modified_on.update = now
table.modified_on.represent = self.date_represent
table.replace_option.label = T("Replace")
self.upload_table = db[self.UPLOAD_TABLE_NAME]
# -------------------------------------------------------------------------
@classmethod
def define_upload_table(cls):
""" Defines the upload table """
db = current.db
if cls.UPLOAD_TABLE_NAME not in db:
upload_table = db.define_table(cls.UPLOAD_TABLE_NAME,
Field("controller",
readable=False,
writable=False),
Field("function",
readable=False,
writable=False),
Field("file", "upload",
uploadfolder=os.path.join(current.request.folder,
"uploads", "imports"),
autodelete=True),
Field("filename",
readable=False,
writable=False),
Field("status", "integer",
default=1,
readable=False,
writable=False),
Field("extra_data",
readable=False,
writable=False),
Field("replace_option", "boolean",
default=False,
readable=False,
writable=False),
Field("job_id",
length=128,
readable=False,
writable=False),
Field("user_id", "integer",
readable=False,
writable=False),
Field("created_on", "datetime",
readable=False,
writable=False),
Field("modified_on", "datetime",
readable=False,
writable=False),
Field("summary_added", "integer",
readable=False,
writable=False),
Field("summary_error", "integer",
readable=False,
writable=False),
Field("summary_ignored", "integer",
readable=False,
writable=False),
Field("completed_details", "text",
readable=False,
writable=False))
else:
upload_table = db[cls.UPLOAD_TABLE_NAME]
return upload_table
# =============================================================================
class S3ImportItem(object):
""" Class representing an import item (=a single record) """
METHOD = Storage(
CREATE="create",
UPDATE="update",
DELETE="delete"
)
POLICY = Storage(
THIS="THIS", # keep local instance
OTHER="OTHER", # update unconditionally
NEWER="NEWER", # update if import is newer
MASTER="MASTER" # update if import is master
)
# -------------------------------------------------------------------------
def __init__(self, job):
"""
Constructor
@param job: the import job this item belongs to
"""
self.job = job
self.ERROR = current.manager.ERROR
# Locking and error handling
self.lock = False
self.error = None
# Identification
import uuid
self.item_id = uuid.uuid4() # unique ID for this item
self.id = None
self.uid = None
# Data elements
self.table = None
self.tablename = None
self.element = None
self.data = None
self.original = None
self.components = []
self.references = []
self.load_components = []
self.load_references = []
self.parent = None
self.skip = False
# Conflict handling
self.mci = 2
self.mtime = datetime.utcnow()
self.modified = True
self.conflict = False
# Allowed import methods
self.strategy = job.strategy
# Update and conflict resolution policies
self.update_policy = job.update_policy
self.conflict_policy = job.conflict_policy
# Actual import method
self.method = None
self.onvalidation = None
self.onaccept = None
# Item import status flags
self.accepted = None
self.permitted = False
self.committed = False
# Writeback hook for circular references:
# Items which need a second write to update references
self.update = []
# -------------------------------------------------------------------------
def __repr__(self):
""" Helper method for debugging """
_str = "<S3ImportItem %s {item_id=%s uid=%s id=%s error=%s data=%s}>" % \
(self.table, self.item_id, self.uid, self.id, self.error, self.data)
return _str
# -------------------------------------------------------------------------
def parse(self,
element,
original=None,
table=None,
tree=None,
files=None):
"""
Read data from a <resource> element
@param element: the element
@param table: the DB table
@param tree: the import tree
@param files: uploaded files
@returns: True if successful, False if not (sets self.error)
"""
db = current.db
xml = current.xml
manager = current.manager
validate = manager.validate
s3db = current.s3db
self.element = element
if table is None:
tablename = element.get(xml.ATTRIBUTE.name, None)
try:
table = s3db[tablename]
except:
self.error = self.ERROR.BAD_RESOURCE
element.set(xml.ATTRIBUTE.error, self.error)
return False
self.table = table
self.tablename = table._tablename
if original is None:
original = S3Resource.original(table, element)
data = xml.record(table, element,
files=files,
original=original,
validate=validate)
if data is None:
self.error = self.ERROR.VALIDATION_ERROR
self.accepted = False
if not element.get(xml.ATTRIBUTE.error, False):
element.set(xml.ATTRIBUTE.error, str(self.error))
return False
self.data = data
if original is not None:
self.original = original
self.id = original[table._id.name]
if xml.UID in original:
self.uid = original[xml.UID]
self.data.update({xml.UID:self.uid})
elif xml.UID in data:
self.uid = data[xml.UID]
if xml.MTIME in data:
self.mtime = data[xml.MTIME]
if xml.MCI in data:
self.mci = data[xml.MCI]
_debug("New item: %s" % self)
return True
# -------------------------------------------------------------------------
def deduplicate(self):
"""
Detect whether this is an update or a new record
"""
RESOLVER = "deduplicate"
if self.id:
return
table = self.table
if table is None:
return
if self.original is not None:
original = self.original
else:
original = S3Resource.original(table, self.data)
if original is not None:
self.original = original
self.id = original[table._id.name]
UID = current.xml.UID
if UID in original:
self.uid = original[UID]
self.data.update({UID:self.uid})
self.method = self.METHOD.UPDATE
else:
resolve = current.s3db.get_config(self.tablename, RESOLVER)
if self.data and resolve:
resolve(self)
return
# -------------------------------------------------------------------------
def authorize(self):
"""
Authorize the import of this item, sets self.permitted
"""
if not self.table:
return False
prefix = self.tablename.split("_", 1)[0]
if prefix in current.manager.PROTECTED:
return False
# Determine the method
self.method = self.METHOD.CREATE
if self.id:
if self.data.deleted is True:
self.method = self.METHOD.DELETE
self.accepted = True
else:
if not self.original:
query = (self.table.id == self.id)
self.original = current.db(query).select(limitby=(0, 1)).first()
if self.original:
self.method = self.METHOD.UPDATE
# Set self.id
if self.method == self.METHOD.CREATE:
self.id = 0
# Authorization
authorize = current.auth.s3_has_permission
if authorize:
self.permitted = authorize(self.method,
self.tablename,
record_id=self.id)
else:
self.permitted = True
return self.permitted
# -------------------------------------------------------------------------
def validate(self):
"""
Validate this item (=record onvalidation), sets self.accepted
"""
if self.accepted is not None:
return self.accepted
if self.data is None or not self.table:
self.accepted = False
return False
form = Storage(method = self.method,
vars = self.data,
request_vars = self.data)
if self.id:
form.vars.id = self.id
form.errors = Storage()
tablename = self.tablename
key = "%s_onvalidation" % self.method
s3db = current.s3db
onvalidation = s3db.get_config(tablename, key,
s3db.get_config(tablename, "onvalidation"))
if onvalidation:
try:
callback(onvalidation, form, tablename=tablename)
except:
pass # @todo need a better handler here.
self.accepted = True
if form.errors:
error = current.xml.ATTRIBUTE.error
for k in form.errors:
e = self.element.findall("data[@field='%s']" % k)
if not e:
e = self.element.findall("reference[@field='%s']" % k)
if not e:
e = self.element
form.errors[k] = "[%s] %s" % (k, form.errors[k])
else:
e = e[0]
e.set(error,
str(form.errors[k]).decode("utf-8"))
self.error = self.ERROR.VALIDATION_ERROR
self.accepted = False
return self.accepted
# -------------------------------------------------------------------------
def commit(self, ignore_errors=False):
"""
Commit this item to the database
@param ignore_errors: skip invalid components
(still reports errors)
"""
# Check if already committed
if self.committed:
# already committed
return True
# If the parent item gets skipped, then skip this item as well
if self.parent is not None and self.parent.skip:
return True
_debug("Committing item %s" % self)
METHOD = self.METHOD
POLICY = self.POLICY
db = current.db
s3db = current.s3db
xml = current.xml
manager = current.manager
table = self.table
# Resolve references
self._resolve_references()
# Set a flag so that we know this is an import job
current.response.s3.bulk = True
# Validate
if not self.validate():
_debug("Validation error: %s (%s)" % (self.error, xml.tostring(self.element, pretty_print=True)))
self.skip = True
return ignore_errors
elif self.components:
for component in self.components:
if not component.validate():
if hasattr(component, "tablename"):
tn = component.tablename
else:
tn = None
_debug("Validation error, component=%s" % tn)
component.skip = True
# Skip this item on any component validation errors
# unless ignore_errors is True
if ignore_errors:
continue
else:
self.skip = True
return False
# De-duplicate
self.deduplicate()
# Log this item
if manager.log is not None:
manager.log(self)
# Authorize item
if not self.authorize():
_debug("Not authorized - skip")
self.error = manager.ERROR.NOT_PERMITTED
self.skip = True
return ignore_errors
method = self.method
_debug("Method: %s" % method)
# Check if import method is allowed in strategy
if not isinstance(self.strategy, (list, tuple)):
self.strategy = [self.strategy]
if method not in self.strategy:
_debug("Method not in strategy - skip")
self.error = manager.ERROR.NOT_PERMITTED
self.skip = True
return True
this = self.original
if not this and self.id and \
method in (METHOD.UPDATE, METHOD.DELETE):
query = (table.id == self.id)
this = db(query).select(limitby=(0, 1)).first()
this_mtime = None
this_mci = 0
if this:
if xml.MTIME in table.fields:
this_mtime = xml.as_utc(this[xml.MTIME])
if xml.MCI in table.fields:
this_mci = this[xml.MCI]
self.mtime = xml.as_utc(self.mtime)
# Conflict detection
this_modified = True
self.modified = True
self.conflict = False
last_sync = xml.as_utc(self.job.last_sync)
if last_sync:
if this_mtime and this_mtime < last_sync:
this_modified = False
if self.mtime and self.mtime < last_sync:
self.modified = False
if self.modified and this_modified:
self.conflict = True
if self.conflict and \
method in (METHOD.UPDATE, METHOD.DELETE):
_debug("Conflict: %s" % self)
if self.job.onconflict:
self.job.onconflict(self)
if self.data is not None:
data = Storage(self.data)
else:
data = Storage()
if isinstance(self.update_policy, dict):
def update_policy(f):
setting = self.update_policy
p = setting.get(f,
setting.get("__default__", POLICY.THIS))
if p not in POLICY:
return POLICY.THIS
return p
else:
def update_policy(f):
p = self.update_policy
if p not in POLICY:
return POLICY.THIS
return p
# Update existing record
if method == METHOD.UPDATE:
if this:
if "deleted" in this and this.deleted:
policy = update_policy(None)
if policy == POLICY.NEWER and \
this_mtime and this_mtime > self.mtime or \
policy == POLICY.MASTER and \
(this_mci == 0 or self.mci != 1):
self.skip = True
return True
fields = data.keys()
for f in fields:
if f not in this:
continue
if isinstance(this[f], datetime):
if xml.as_utc(data[f]) == xml.as_utc(this[f]):
del data[f]
continue
else:
if data[f] == this[f]:
del data[f]
continue
remove = False
policy = update_policy(f)
if policy == POLICY.THIS:
remove = True
elif policy == POLICY.NEWER:
if this_mtime and this_mtime > self.mtime:
remove = True
elif policy == POLICY.MASTER:
if this_mci == 0 or self.mci != 1:
remove = True
if remove:
del data[f]
self.data.update({f:this[f]})
if "deleted" in this and this.deleted:
# Undelete re-imported records:
data.update(deleted=False)
if "deleted_fk" in table:
data.update(deleted_fk="")
if "created_by" in table:
data.update(created_by=table.created_by.default)
if "modified_by" in table:
data.update(modified_by=table.modified_by.default)
if not self.skip and not self.conflict and \
(len(data) or self.components or self.references):
if self.uid and xml.UID in table:
data.update({xml.UID:self.uid})
if xml.MTIME in table:
data.update({xml.MTIME: self.mtime})
if xml.MCI in data:
# retain local MCI on updates
del data[xml.MCI]
query = (table._id == self.id)
try:
success = db(query).update(**dict(data))
except:
self.error = sys.exc_info()[1]
self.skip = True
return False
if success:
self.committed = True
else:
# Nothing to update
self.committed = True
# Create new record
elif method == METHOD.CREATE:
# Do not apply field policy to UID and MCI
UID = xml.UID
if UID in data:
del data[UID]
MCI = xml.MCI
if MCI in data:
del data[MCI]
for f in data:
policy = update_policy(f)
if policy == POLICY.MASTER and self.mci != 1:
del data[f]
if len(data) or self.components or self.references:
# Restore UID and MCI
if self.uid and UID in table.fields:
data.update({UID:self.uid})
if MCI in table.fields:
data.update({MCI:self.mci})
# Insert the new record
try:
success = table.insert(**dict(data))
except:
self.error = sys.exc_info()[1]
self.skip = True
return False
if success:
self.id = success
self.committed = True
else:
# Nothing to create
self.skip = True
return True
# Delete local record
elif method == METHOD.DELETE:
if this:
if this.deleted:
self.skip = True
policy = update_policy(None)
if policy == POLICY.THIS:
self.skip = True
elif policy == POLICY.NEWER and \
(this_mtime and this_mtime > self.mtime):
self.skip = True
elif policy == POLICY.MASTER and \
(this_mci == 0 or self.mci != 1):
self.skip = True
else:
self.skip = True
if not self.skip and not self.conflict:
resource = s3db.resource(self.tablename, id=self.id)
ondelete = s3db.get_config(self.tablename, "ondelete")
success = resource.delete(ondelete=ondelete,
cascade=True)
if resource.error:
self.error = resource.error
self.skip = True
return ignore_errors
_debug("Success: %s, id=%s %sd" % (self.tablename, self.id,
self.skip and "skippe" or \
method))
return True
# Audit + onaccept on successful commits
if self.committed:
form = Storage()
form.method = method
form.vars = self.data
tablename = self.tablename
prefix, name = tablename.split("_", 1)
if self.id:
form.vars.id = self.id
if manager.audit is not None:
manager.audit(method, prefix, name,
form=form,
record=self.id,
representation="xml")
# Update super entity links
s3db.update_super(table, form.vars)
if method == METHOD.CREATE:
# Set record owner
current.auth.s3_set_record_owner(table, self.id)
elif method == METHOD.UPDATE:
# Update realm
update_realm = s3db.get_config(table, "update_realm")
if update_realm:
current.auth.set_realm_entity(table, self.id,
force_update=True)
# Onaccept
key = "%s_onaccept" % method
onaccept = s3db.get_config(tablename, key,
s3db.get_config(tablename, "onaccept"))
if onaccept:
callback(onaccept, form, tablename=self.tablename)
# Update referencing items
if self.update and self.id:
for u in self.update:
item = u.get("item", None)
if not item:
continue
field = u.get("field", None)
if isinstance(field, (list, tuple)):
pkey, fkey = field
query = table.id == self.id
row = db(query).select(table[pkey],
limitby=(0, 1)).first()
if row:
item._update_reference(fkey, row[pkey])
else:
item._update_reference(field, self.id)
_debug("Success: %s, id=%s %sd" % (self.tablename, self.id,
self.skip and "skippe" or \
method))
return True
# -------------------------------------------------------------------------
def _resolve_references(self):
"""
Resolve the references of this item (=look up all foreign
keys from other items of the same job). If a foreign key
is not yet available, it will be scheduled for later update.
"""
if not self.table:
return
items = self.job.items
for reference in self.references:
item = None
field = reference.field
entry = reference.entry
if not entry:
continue
# Resolve key tuples
if isinstance(field, (list,tuple)):
pkey, fkey = field
else:
pkey, fkey = ("id", field)
# Resolve the key table name
ktablename, key, multiple = s3_get_foreign_key(self.table[fkey])
if not ktablename:
if self.tablename == "auth_user" and \
fkey == "organisation_id":
ktablename = "org_organisation"
else:
continue
if entry.tablename:
ktablename = entry.tablename
try:
ktable = current.s3db[ktablename]
except:
continue
# Resolve the foreign key (value)
fk = entry.id
if entry.item_id:
item = items[entry.item_id]
if item:
fk = item.id
if fk and pkey != "id":
row = current.db(ktable._id == fk).select(ktable[pkey],
limitby=(0, 1)).first()
if not row:
fk = None
continue
else:
fk = row[pkey]
# Update record data
if fk:
if multiple:
val = self.data.get(fkey, [])
if fk not in val:
val.append(fk)
self.data[fkey] = val
else:
self.data[fkey] = fk
else:
if fkey in self.data and not multiple:
del self.data[fkey]
if item:
item.update.append(dict(item=self, field=fkey))
# -------------------------------------------------------------------------
def _update_reference(self, field, value):
"""
Helper method to update a foreign key in an already written
record. Will be called by the referenced item after (and only
if) it has been committed. This is only needed if the reference
could not be resolved before commit due to circular references.
@param field: the field name of the foreign key
@param value: the value of the foreign key
"""
if not value or not self.table:
return
db = current.db
if self.id and self.permitted:
fieldtype = str(self.table[field].type)
if fieldtype.startswith("list:reference"):
query = (self.table.id == self.id)
record = db(query).select(self.table[field],
limitby=(0,1)).first()
if record:
values = record[field]
if value not in values:
values.append(value)
db(self.table.id == self.id).update(**{field:values})
else:
db(self.table.id == self.id).update(**{field:value})
# -------------------------------------------------------------------------
def store(self, item_table=None):
"""
Store this item in the DB
"""
_debug("Storing item %s" % self)
if item_table is None:
return None
db = current.db
query = item_table.item_id == self.item_id
row = db(query).select(item_table.id, limitby=(0, 1)).first()
if row:
record_id = row.id
else:
record_id = None
record = Storage(job_id = self.job.job_id,
item_id = self.item_id,
tablename = self.tablename,
record_uid = self.uid,
error = self.error)
if self.element is not None:
element_str = current.xml.tostring(self.element,
xml_declaration=False)
record.update(element=element_str)
if self.data is not None:
data = Storage()
for f in self.data.keys():
table = self.table
if f not in table.fields:
continue
fieldtype = str(self.table[f].type)
if fieldtype == "id" or s3_has_foreign_key(self.table[f]):
continue
data.update({f:self.data[f]})
data_str = cPickle.dumps(data)
record.update(data=data_str)
ritems = []
for reference in self.references:
field = reference.field
entry = reference.entry
store_entry = None
if entry:
if entry.item_id is not None:
store_entry = dict(field=field,
item_id=str(entry.item_id))
elif entry.uid is not None:
store_entry = dict(field=field,
tablename=entry.tablename,
uid=str(entry.uid))
if store_entry is not None:
ritems.append(json.dumps(store_entry))
if ritems:
record.update(ritems=ritems)
citems = [c.item_id for c in self.components]
if citems:
record.update(citems=citems)
if self.parent:
record.update(parent=self.parent.item_id)
if record_id:
db(item_table.id == record_id).update(**record)
else:
record_id = item_table.insert(**record)
_debug("Record ID=%s" % record_id)
return record_id
# -------------------------------------------------------------------------
def restore(self, row):
"""
Restore an item from a item table row. This does not restore
the references (since this can not be done before all items
are restored), must call job.restore_references() to do that
@param row: the item table row
"""
xml = current.xml
self.item_id = row.item_id
self.accepted = None
self.permitted = False
self.committed = False
tablename = row.tablename
self.id = None
self.uid = row.record_uid
if row.data is not None:
self.data = cPickle.loads(row.data)
else:
self.data = Storage()
data = self.data
if xml.MTIME in data:
self.mtime = data[xml.MTIME]
if xml.MCI in data:
self.mci = data[xml.MCI]
UID = xml.UID
if UID in data:
self.uid = data[UID]
self.element = etree.fromstring(row.element)
if row.citems:
self.load_components = row.citems
if row.ritems:
self.load_references = [json.loads(ritem) for ritem in row.ritems]
self.load_parent = row.parent
try:
table = current.s3db[tablename]
except:
self.error = self.ERROR.BAD_RESOURCE
return False
else:
self.table = table
self.tablename = tablename
original = S3Resource.original(table, self.data)
if original is not None:
self.original = original
self.id = original[table._id.name]
if UID in original:
self.uid = original[UID]
self.data.update({UID:self.uid})
self.error = row.error
if self.error and not self.data:
# Validation error
return False
return True
# =============================================================================
class S3ImportJob():
"""
Class to import an element tree into the database
"""
JOB_TABLE_NAME = "s3_import_job"
ITEM_TABLE_NAME = "s3_import_item"
# -------------------------------------------------------------------------
def __init__(self, manager, table,
tree=None,
files=None,
job_id=None,
strategy=None,
update_policy=None,
conflict_policy=None,
last_sync=None,
onconflict=None):
"""
Constructor
@param manager: the S3RequestManager instance performing this job
@param tree: the element tree to import
@param files: files attached to the import (for upload fields)
@param job_id: restore job from database (record ID or job_id)
@param strategy: the import strategy
@param update_policy: the update policy
@param conflict_policy: the conflict resolution policy
@param last_sync: the last synchronization time stamp (datetime)
@param onconflict: custom conflict resolver function
"""
self.error = None # the last error
self.error_tree = etree.Element(current.xml.TAG.root)
self.table = table
self.tree = tree
self.files = files
self.directory = Storage()
self.elements = Storage()
self.items = Storage()
self.references = []
self.job_table = None
self.item_table = None
self.count = 0 # total number of records imported
self.created = [] # IDs of created records
self.updated = [] # IDs of updated records
self.deleted = [] # IDs of deleted records
# Import strategy
self.strategy = strategy
if self.strategy is None:
self.strategy = [S3ImportItem.METHOD.CREATE,
S3ImportItem.METHOD.UPDATE,
S3ImportItem.METHOD.DELETE]
if not isinstance(self.strategy, (tuple, list)):
self.strategy = [self.strategy]
# Update policy (default=always update)
self.update_policy = update_policy
if not self.update_policy:
self.update_policy = S3ImportItem.POLICY.OTHER
# Conflict resolution policy (default=always update)
self.conflict_policy = conflict_policy
if not self.conflict_policy:
self.conflict_policy = S3ImportItem.POLICY.OTHER
# Synchronization settings
self.mtime = None
self.last_sync = last_sync
self.onconflict = onconflict
if job_id:
self.__define_tables()
jobtable = self.job_table
if str(job_id).isdigit():
query = jobtable.id == job_id
else:
query = jobtable.job_id == job_id
row = current.db(query).select(limitby=(0, 1)).first()
if not row:
raise SyntaxError("Job record not found")
self.job_id = row.job_id
if not self.table:
tablename = row.tablename
try:
table = current.s3db[tablename]
except:
pass
else:
import uuid
self.job_id = uuid.uuid4() # unique ID for this job
# -------------------------------------------------------------------------
def add_item(self,
element=None,
original=None,
components=None,
parent=None,
joinby=None):
"""
Parse and validate an XML element and add it as new item
to the job.
@param element: the element
@param original: the original DB record (if already available,
will otherwise be looked-up by this function)
@param components: a dictionary of components (as in S3Resource)
to include in the job (defaults to all
defined components)
@param parent: the parent item (if this is a component)
@param joinby: the component join key(s) (if this is a component)
@returns: a unique identifier for the new item, or None if there
was an error. self.error contains the last error, and
self.error_tree an element tree with all failing elements
including error attributes.
"""
if element in self.elements:
# element has already been added to this job
return self.elements[element]
# Parse the main element
item = S3ImportItem(self)
# Update lookup lists
item_id = item.item_id
self.items[item_id] = item
if element is not None:
self.elements[element] = item_id
if not item.parse(element,
original=original,
files=self.files):
self.error = item.error
item.accepted = False
if parent is None:
self.error_tree.append(deepcopy(item.element))
else:
# Now parse the components
table = item.table
components = current.s3db.get_components(table, names=components)
cnames = Storage()
cinfos = Storage()
for alias in components:
component = components[alias]
pkey = component.pkey
if component.linktable:
ctable = component.linktable
fkey = component.lkey
else:
ctable = component.table
fkey = component.fkey
ctablename = ctable._tablename
if ctablename in cnames:
cnames[ctablename].append(alias)
else:
cnames[ctablename] = [alias]
cinfos[(ctablename, alias)] = Storage(component = component,
ctable = ctable,
pkey = pkey,
fkey = fkey,
original = None,
uid = None)
add_item = self.add_item
xml = current.xml
for celement in xml.components(element, names=cnames.keys()):
# Get the component tablename
ctablename = celement.get(xml.ATTRIBUTE.name, None)
if not ctablename:
continue
# Get the component alias (for disambiguation)
calias = celement.get(xml.ATTRIBUTE.alias, None)
if calias is None:
if ctablename not in cnames:
continue
aliases = cnames[ctablename]
if len(aliases) == 1:
calias = aliases[0]
else:
# ambiguous components *must* use alias
continue
if (ctablename, calias) not in cinfos:
continue
else:
cinfo = cinfos[(ctablename, calias)]
component = cinfo.component
original = cinfo.original
ctable = cinfo.ctable
pkey = cinfo.pkey
fkey = cinfo.fkey
if not component.multiple:
if cinfo.uid is not None:
continue
if original is None and item.id:
query = (table.id == item.id) & \
(table[pkey] == ctable[fkey])
original = current.db(query).select(ctable.ALL,
limitby=(0, 1)).first()
if original:
cinfo.uid = uid = original.get(xml.UID, None)
celement.set(xml.UID, uid)
cinfo.original = original
item_id = add_item(element=celement,
original=original,
parent=item,
joinby=(pkey, fkey))
if item_id is None:
item.error = self.error
self.error_tree.append(deepcopy(item.element))
else:
citem = self.items[item_id]
citem.parent = item
item.components.append(citem)
# Handle references
table = item.table
tree = self.tree
if tree is not None:
fields = [table[f] for f in table.fields]
rfields = filter(s3_has_foreign_key, fields)
item.references = self.lookahead(element,
table=table,
fields=rfields,
tree=tree,
directory=self.directory)
for reference in item.references:
entry = reference.entry
if entry and entry.element is not None:
item_id = add_item(element=entry.element)
if item_id:
entry.update(item_id=item_id)
# Parent reference
if parent is not None:
entry = Storage(item_id=parent.item_id,
element=parent.element,
tablename=parent.tablename)
item.references.append(Storage(field=joinby,
entry=entry))
return item.item_id
# -------------------------------------------------------------------------
def lookahead(self,
element,
table=None,
fields=None,
tree=None,
directory=None):
"""
Find referenced elements in the tree
@param element: the element
@param table: the DB table
@param fields: the FK fields in the table
@param tree: the import tree
@param directory: a dictionary to lookup elements in the tree
(will be filled in by this function)
"""
db = current.db
s3db = current.s3db
xml = current.xml
import_uid = xml.import_uid
ATTRIBUTE = xml.ATTRIBUTE
TAG = xml.TAG
UID = xml.UID
reference_list = []
root = None
if tree is not None:
if isinstance(tree, etree._Element):
root = tree
else:
root = tree.getroot()
references = element.findall("reference")
for reference in references:
field = reference.get(ATTRIBUTE.field, None)
# Ignore references without valid field-attribute
if not field or field not in fields:
continue
# Find the key table
ktablename, key, multiple = s3_get_foreign_key(table[field])
if not ktablename:
if table._tablename == "auth_user" and \
field == "organisation_id":
ktablename = "org_organisation"
else:
continue
try:
ktable = current.s3db[ktablename]
except:
continue
tablename = reference.get(ATTRIBUTE.resource, None)
# Ignore references to tables without UID field:
if UID not in ktable.fields:
continue
# Fall back to key table name if tablename is not specified:
if not tablename:
tablename = ktablename
# Super-entity references must use the super-key:
if tablename != ktablename:
field = (ktable._id.name, field)
# Ignore direct references to super-entities:
if tablename == ktablename and ktable._id.name != "id":
continue
# Get the foreign key
uids = reference.get(UID, None)
attr = UID
if not uids:
uids = reference.get(ATTRIBUTE.tuid, None)
attr = ATTRIBUTE.tuid
if uids and multiple:
uids = json.loads(uids)
elif uids:
uids = [uids]
# Find the elements and map to DB records
relements = []
# Create a UID<->ID map
id_map = Storage()
if attr == UID and uids:
_uids = map(import_uid, uids)
query = ktable[UID].belongs(_uids)
records = db(query).select(ktable.id,
ktable[UID])
id_map = dict([(r[UID], r.id) for r in records])
if not uids:
# Anonymous reference: <resource> inside the element
expr = './/%s[@%s="%s"]' % (TAG.resource,
ATTRIBUTE.name,
tablename)
relements = reference.xpath(expr)
if relements and not multiple:
relements = [relements[0]]
elif root is not None:
for uid in uids:
entry = None
# Entry already in directory?
if directory is not None:
entry = directory.get((tablename, attr, uid), None)
if not entry:
expr = ".//%s[@%s='%s' and @%s='%s']" % (
TAG.resource,
ATTRIBUTE.name,
tablename,
attr,
uid)
e = root.xpath(expr)
if e:
# Element in the source => append to relements
relements.append(e[0])
else:
# No element found, see if original record exists
_uid = import_uid(uid)
if _uid and _uid in id_map:
_id = id_map[_uid]
entry = Storage(tablename=tablename,
element=None,
uid=uid,
id=_id,
item_id=None)
reference_list.append(Storage(field=field,
entry=entry))
else:
continue
else:
reference_list.append(Storage(field=field,
entry=entry))
# Create entries for all newly found elements
for relement in relements:
uid = relement.get(attr, None)
if attr == UID:
_uid = import_uid(uid)
id = _uid and id_map and id_map.get(_uid, None) or None
else:
_uid = None
id = None
entry = Storage(tablename=tablename,
element=relement,
uid=uid,
id=id,
item_id=None)
# Add entry to directory
if uid and directory is not None:
directory[(tablename, attr, uid)] = entry
# Append the entry to the reference list
reference_list.append(Storage(field=field, entry=entry))
return reference_list
# -------------------------------------------------------------------------
def load_item(self, row):
"""
Load an item from the item table (counterpart to add_item
when restoring a job from the database)
"""
item = S3ImportItem(self)
if not item.restore(row):
self.error = item.error
if item.load_parent is None:
self.error_tree.append(deepcopy(item.element))
# Update lookup lists
item_id = item.item_id
self.items[item_id] = item
return item_id
# -------------------------------------------------------------------------
def resolve(self, item_id, import_list):
"""
Resolve the reference list of an item
@param item_id: the import item UID
@param import_list: the ordered list of items (UIDs) to import
"""
item = self.items[item_id]
if item.lock or item.accepted is False:
return False
references = []
for reference in item.references:
ritem_id = reference.entry.item_id
if ritem_id and ritem_id not in import_list:
references.append(ritem_id)
for ritem_id in references:
item.lock = True
if self.resolve(ritem_id, import_list):
import_list.append(ritem_id)
item.lock = False
return True
# -------------------------------------------------------------------------
def commit(self, ignore_errors=False):
"""
Commit the import job to the DB
@param ignore_errors: skip any items with errors
(does still report the errors)
"""
ATTRIBUTE = current.xml.ATTRIBUTE
# Resolve references
import_list = []
for item_id in self.items:
self.resolve(item_id, import_list)
if item_id not in import_list:
import_list.append(item_id)
# Commit the items
items = self.items
count = 0
mtime = None
created = []
cappend = created.append
updated = []
deleted = []
tablename = self.table._tablename
for item_id in import_list:
item = items[item_id]
error = None
success = item.commit(ignore_errors=ignore_errors)
error = item.error
if error:
self.error = error
element = item.element
if element is not None:
if not element.get(ATTRIBUTE.error, False):
element.set(ATTRIBUTE.error, str(self.error))
self.error_tree.append(deepcopy(element))
if not ignore_errors:
return False
elif item.tablename == tablename:
count += 1
if mtime is None or item.mtime > mtime:
mtime = item.mtime
if item.id:
if item.method == item.METHOD.CREATE:
cappend(item.id)
elif item.method == item.METHOD.UPDATE:
updated.append(item.id)
elif item.method == item.METHOD.DELETE:
deleted.append(item.id)
self.count = count
self.mtime = mtime
self.created = created
self.updated = updated
self.deleted = deleted
return True
# -------------------------------------------------------------------------
def __define_tables(self):
"""
Define the database tables for jobs and items
"""
self.job_table = self.define_job_table()
self.item_table = self.define_item_table()
# -------------------------------------------------------------------------
@classmethod
def define_job_table(cls):
db = current.db
if cls.JOB_TABLE_NAME not in db:
job_table = db.define_table(cls.JOB_TABLE_NAME,
Field("job_id", length=128,
unique=True,
notnull=True),
Field("tablename"),
Field("timestmp", "datetime",
default=datetime.utcnow()))
else:
job_table = db[cls.JOB_TABLE_NAME]
return job_table
# -------------------------------------------------------------------------
@classmethod
def define_item_table(cls):
db = current.db
if cls.ITEM_TABLE_NAME not in db:
item_table = db.define_table(cls.ITEM_TABLE_NAME,
Field("item_id", length=128,
unique=True,
notnull=True),
Field("job_id", length=128),
Field("tablename", length=128),
#Field("record_id", "integer"),
Field("record_uid"),
Field("error", "text"),
Field("data", "text"),
Field("element", "text"),
Field("ritems", "list:string"),
Field("citems", "list:string"),
Field("parent", length=128))
else:
item_table = db[cls.ITEM_TABLE_NAME]
return item_table
# -------------------------------------------------------------------------
def store(self):
"""
Store this job and all its items in the job table
"""
db = current.db
_debug("Storing Job ID=%s" % self.job_id)
self.__define_tables()
jobtable = self.job_table
query = jobtable.job_id == self.job_id
row = db(query).select(jobtable.id, limitby=(0, 1)).first()
if row:
record_id = row.id
else:
record_id = None
record = Storage(job_id=self.job_id)
try:
tablename = self.table._tablename
except:
pass
else:
record.update(tablename=tablename)
for item in self.items.values():
item.store(item_table=self.item_table)
if record_id:
db(jobtable.id == record_id).update(**record)
else:
record_id = jobtable.insert(**record)
_debug("Job record ID=%s" % record_id)
return record_id
# -------------------------------------------------------------------------
def get_tree(self):
"""
Reconstruct the element tree of this job
"""
if self.tree is not None:
return tree
else:
xml = current.xml
root = etree.Element(xml.TAG.root)
for item in self.items.values():
if item.element is not None and not item.parent:
if item.tablename == self.table._tablename or \
item.element.get(xml.UID, None) or \
item.element.get(xml.ATTRIBUTE.tuid, None):
root.append(deepcopy(item.element))
return etree.ElementTree(root)
# -------------------------------------------------------------------------
def delete(self):
"""
Delete this job and all its items from the job table
"""
db = current.db
_debug("Deleting job ID=%s" % self.job_id)
self.__define_tables()
item_table = self.item_table
query = item_table.job_id == self.job_id
db(query).delete()
job_table = self.job_table
query = job_table.job_id == self.job_id
db(query).delete()
# -------------------------------------------------------------------------
def restore_references(self):
"""
Restore the job's reference structure after loading items
from the item table
"""
db = current.db
UID = current.xml.UID
for item in self.items.values():
for citem_id in item.load_components:
if citem_id in self.items:
item.components.append(self.items[citem_id])
item.load_components = []
for ritem in item.load_references:
field = ritem["field"]
if "item_id" in ritem:
item_id = ritem["item_id"]
if item_id in self.items:
_item = self.items[item_id]
entry = Storage(tablename=_item.tablename,
element=_item.element,
uid=_item.uid,
id=_item.id,
item_id=item_id)
item.references.append(Storage(field=field,
entry=entry))
else:
_id = None
uid = ritem.get("uid", None)
tablename = ritem.get("tablename", None)
if tablename and uid:
try:
table = current.s3db[tablename]
except:
continue
if UID not in table.fields:
continue
query = table[UID] == uid
row = db(query).select(table._id,
limitby=(0, 1)).first()
if row:
_id = row[table._id.name]
else:
continue
entry = Storage(tablename = ritem["tablename"],
element=None,
uid = ritem["uid"],
id = _id,
item_id = None)
item.references.append(Storage(field=field,
entry=entry))
item.load_references = []
if item.load_parent is not None:
item.parent = self.items[item.load_parent]
item.load_parent = None
# END =========================================================================
| mit |
brandond/ansible | test/units/module_utils/gcp/test_auth.py | 45 | 6823 | # -*- coding: utf-8 -*-
# (c) 2016, Tom Melendez (@supertom) <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import pytest
from units.compat import mock, unittest
from ansible.module_utils.gcp import (_get_gcp_ansible_credentials, _get_gcp_credentials, _get_gcp_environ_var,
_get_gcp_environment_credentials,
_validate_credentials_file)
# Fake data/function used for testing
fake_env_data = {'GCE_EMAIL': 'gce-email'}
def fake_get_gcp_environ_var(var_name, default_value):
if var_name not in fake_env_data:
return default_value
else:
return fake_env_data[var_name]
# Fake AnsibleModule for use in tests
class FakeModule(object):
class Params():
data = {}
def get(self, key, alt=None):
if key in self.data:
return self.data[key]
else:
return alt
def __init__(self, data=None):
data = {} if data is None else data
self.params = FakeModule.Params()
self.params.data = data
def fail_json(self, **kwargs):
raise ValueError("fail_json")
def deprecate(self, **kwargs):
return None
class GCPAuthTestCase(unittest.TestCase):
"""Tests to verify different Auth mechanisms."""
def setup_method(self, method):
global fake_env_data
fake_env_data = {'GCE_EMAIL': 'gce-email'}
def test_get_gcp_ansible_credentials(self):
input_data = {'service_account_email': 'mysa',
'credentials_file': 'path-to-file.json',
'project_id': 'my-cool-project'}
module = FakeModule(input_data)
actual = _get_gcp_ansible_credentials(module)
expected = tuple(input_data.values())
self.assertEqual(sorted(expected), sorted(actual))
def test_get_gcp_environ_var(self):
# Chose not to mock this so we could really verify that it
# works as expected.
existing_var_name = 'gcp_ansible_auth_test_54321'
non_existing_var_name = 'doesnt_exist_gcp_ansible_auth_test_12345'
os.environ[existing_var_name] = 'foobar'
self.assertEqual('foobar', _get_gcp_environ_var(
existing_var_name, None))
del os.environ[existing_var_name]
self.assertEqual('default_value', _get_gcp_environ_var(
non_existing_var_name, 'default_value'))
def test_validate_credentials_file(self):
# TODO(supertom): Only dealing with p12 here, check the other states
# of this function
module = FakeModule()
with mock.patch("ansible.module_utils.gcp.open",
mock.mock_open(read_data='foobar'), create=True) as m:
# pem condition, warning is suppressed with the return_value
credentials_file = '/foopath/pem.pem'
with self.assertRaises(ValueError):
_validate_credentials_file(module,
credentials_file=credentials_file,
require_valid_json=False,
check_libcloud=False)
@mock.patch('ansible.module_utils.gcp._get_gcp_environ_var',
side_effect=fake_get_gcp_environ_var)
def test_get_gcp_environment_credentials(self, mockobj):
global fake_env_data
actual = _get_gcp_environment_credentials(None, None, None)
expected = tuple(['gce-email', None, None])
self.assertEqual(expected, actual)
fake_env_data = {'GCE_PEM_FILE_PATH': '/path/to/pem.pem'}
expected = tuple([None, '/path/to/pem.pem', None])
actual = _get_gcp_environment_credentials(None, None, None)
self.assertEqual(expected, actual)
# pem and creds are set, expect creds
fake_env_data = {'GCE_PEM_FILE_PATH': '/path/to/pem.pem',
'GCE_CREDENTIALS_FILE_PATH': '/path/to/creds.json'}
expected = tuple([None, '/path/to/creds.json', None])
actual = _get_gcp_environment_credentials(None, None, None)
self.assertEqual(expected, actual)
# expect GOOGLE_APPLICATION_CREDENTIALS over PEM
fake_env_data = {'GCE_PEM_FILE_PATH': '/path/to/pem.pem',
'GOOGLE_APPLICATION_CREDENTIALS': '/path/to/appcreds.json'}
expected = tuple([None, '/path/to/appcreds.json', None])
actual = _get_gcp_environment_credentials(None, None, None)
self.assertEqual(expected, actual)
# project tests
fake_env_data = {'GCE_PROJECT': 'my-project'}
expected = tuple([None, None, 'my-project'])
actual = _get_gcp_environment_credentials(None, None, None)
self.assertEqual(expected, actual)
fake_env_data = {'GOOGLE_CLOUD_PROJECT': 'my-cloud-project'}
expected = tuple([None, None, 'my-cloud-project'])
actual = _get_gcp_environment_credentials(None, None, None)
self.assertEqual(expected, actual)
# data passed in, picking up project id only
fake_env_data = {'GOOGLE_CLOUD_PROJECT': 'my-project'}
expected = tuple(['my-sa-email', '/path/to/creds.json', 'my-project'])
actual = _get_gcp_environment_credentials(
'my-sa-email', '/path/to/creds.json', None)
self.assertEqual(expected, actual)
@mock.patch('ansible.module_utils.gcp._get_gcp_environ_var',
side_effect=fake_get_gcp_environ_var)
def test_get_gcp_credentials(self, mockobj):
global fake_env_data
fake_env_data = {}
module = FakeModule()
module.params.data = {}
# Nothing is set, calls fail_json
with pytest.raises(ValueError):
_get_gcp_credentials(module)
# project_id (only) is set from Ansible params.
module.params.data['project_id'] = 'my-project'
actual = _get_gcp_credentials(
module, require_valid_json=True, check_libcloud=False)
expected = {'service_account_email': '',
'project_id': 'my-project',
'credentials_file': ''}
self.assertEqual(expected, actual)
| gpl-3.0 |
mpasternak/pyglet-fix-issue-552 | contrib/wydget/wydget/widgets/checkbox.py | 29 | 1402 | from pyglet.gl import *
from pyglet import clock
from pyglet.window import key, mouse
from wydget import element, event, util, anim, data
class Checkbox(element.Element):
name='checkbox'
is_focusable = True
def __init__(self, parent, value=False, width=16, height=16, **kw):
self.parent = parent
self.value = value
super(Checkbox, self).__init__(parent, None, None, None, width,
height, **kw)
def intrinsic_width(self):
return self.width or 16
def intrinsic_height(self):
return self.height or 16
def render(self, rect):
# XXX handle rect (and use images...)
glPushAttrib(GL_CURRENT_BIT)
glColor4f(.85, .85, .85, 1)
w, h = 16, 16
glRectf(1, 1, w, h)
if self.value:
glColor4f(0, 0, 0, 1)
w8, h8 = w//8, h//8
glBegin(GL_LINE_STRIP)
glVertex2f(1+w8, 1+4*h8)
glVertex2f(1+3*w8, 1+h8)
glVertex2f(1+7*w8, 1+7*h8)
glEnd()
glPopAttrib()
@event.default('checkbox')
def on_click(widget, *args):
widget.value = not widget.value
return event.EVENT_UNHANDLED
@event.default('checkbox')
def on_text(self, text):
if text in (' \r'):
self.getGUI().dispatch_event(self, 'on_click', 0, 0, mouse.LEFT, 0, 1)
return event.EVENT_HANDLED
return event.EVENT_UNHANDLED
| bsd-3-clause |
FBSLikan/Cetico-TCC | data/libs/reportlab/lib/logger.py | 3 | 1760 | #!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2016
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/logger.py
__version__='3.3.0'
__doc__="Logging and warning framework, predating Python's logging package"
from sys import stderr
class Logger:
'''
An extended file type thing initially equivalent to sys.stderr
You can add/remove file type things; it has a write method
'''
def __init__(self):
self._fps = [stderr]
self._fns = {}
def add(self,fp):
'''add the file/string fp to the destinations'''
if isinstance(fp,str):
if fp in self._fns: return
fp = open(fn,'wb')
self._fns[fn] = fp
self._fps.append(fp)
def remove(self,fp):
'''remove the file/string fp from the destinations'''
if isinstance(fp,str):
if fp not in self._fns: return
fn = fp
fp = self._fns[fn]
del self.fns[fn]
if fp in self._fps:
del self._fps[self._fps.index(fp)]
def write(self,text):
'''write text to all the destinations'''
if text[-1]!='\n': text=text+'\n'
for fp in self._fps: fp.write(text)
def __call__(self,text):
self.write(text)
logger=Logger()
class WarnOnce:
def __init__(self,kind='Warn'):
self.uttered = {}
self.pfx = '%s: '%kind
self.enabled = 1
def once(self,warning):
if warning not in self.uttered:
if self.enabled: logger.write(self.pfx + warning)
self.uttered[warning] = 1
def __call__(self,warning):
self.once(warning)
warnOnce=WarnOnce()
infoOnce=WarnOnce('Info')
| gpl-3.0 |
vmalloc/weber-utils | weber_utils/sorting.py | 1 | 1136 | import functools
from flask import request
from sqlalchemy import desc
from ._compat import httplib
from .request_utils import error_abort
def sort_query(query, allowed_fields=(), default=None):
allowed_fields = set(allowed_fields)
sort_param = request.args.get("sort", None)
if sort_param:
sort_fields = sort_param.split(",")
for sort_field in sort_fields:
descending = sort_field.startswith("-")
if descending:
sort_field = sort_field[1:]
if sort_field not in allowed_fields:
error_abort(httplib.BAD_REQUEST, "Cannot sort by field {0!r}".format(sort_field))
query = query.order_by(desc(sort_field) if descending else sort_field)
elif default is not None:
query = query.order_by(default)
return query
def sorted_view(func=None, **sort_kwargs):
if func is None:
return functools.partial(sorted_view, **sort_kwargs)
@functools.wraps(func)
def new_func(*args, **kwargs):
returned = func(*args, **kwargs)
return sort_query(returned, **sort_kwargs)
return new_func
| bsd-3-clause |
dongjoon-hyun/tensorflow | tensorflow/python/keras/preprocessing/sequence.py | 12 | 4152 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for preprocessing sequence data.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_preprocessing import sequence
from tensorflow.python.keras import utils
from tensorflow.python.util.tf_export import tf_export
pad_sequences = sequence.pad_sequences
make_sampling_table = sequence.make_sampling_table
skipgrams = sequence.skipgrams
# TODO(fchollet): consider making `_remove_long_seq` public.
_remove_long_seq = sequence._remove_long_seq # pylint: disable=protected-access
@tf_export('keras.preprocessing.sequence.TimeseriesGenerator')
class TimeseriesGenerator(sequence.TimeseriesGenerator, utils.Sequence):
"""Utility class for generating batches of temporal data.
This class takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
stride, length of history, etc., to produce batches for
training/validation.
# Arguments
data: Indexable generator (such as list or Numpy array)
containing consecutive data points (timesteps).
The data should be at 2D, and axis 0 is expected
to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
It should have same length as `data`.
length: Length of the output sequences (in number of timesteps).
sampling_rate: Period between successive individual timesteps
within sequences. For rate `r`, timesteps
`data[i]`, `data[i-r]`, ... `data[i - length]`
are used for create a sample sequence.
stride: Period between successive output sequences.
For stride `s`, consecutive output samples would
be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc.
start_index: Data points earlier than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Data points later than `end_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
reverse: Boolean: if `true`, timesteps in each output sample will be
in reverse chronological order.
batch_size: Number of timeseries samples in each batch
(except maybe the last one).
# Returns
A [Sequence](/utils/#sequence) instance.
# Examples
```python
from keras.preprocessing.sequence import TimeseriesGenerator
import numpy as np
data = np.array([[i] for i in range(50)])
targets = np.array([[i] for i in range(50)])
data_gen = TimeseriesGenerator(data, targets,
length=10, sampling_rate=2,
batch_size=2)
assert len(data_gen) == 20
batch_0 = data_gen[0]
x, y = batch_0
assert np.array_equal(x,
np.array([[[0], [2], [4], [6], [8]],
[[1], [3], [5], [7], [9]]]))
assert np.array_equal(y,
np.array([[10], [11]]))
```
"""
pass
tf_export('keras.preprocessing.sequence.pad_sequences')(pad_sequences)
tf_export(
'keras.preprocessing.sequence.make_sampling_table')(make_sampling_table)
tf_export('keras.preprocessing.sequence.skipgrams')(skipgrams)
| apache-2.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/tri/triplot.py | 8 | 3150 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
def triplot(ax, *args, **kwargs):
"""
Draw a unstructured triangular grid as lines and/or markers.
The triangulation to plot can be specified in one of two ways;
either::
triplot(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
triplot(x, y, ...)
triplot(x, y, triangles, ...)
triplot(x, y, triangles=triangles, ...)
triplot(x, y, mask=mask, ...)
triplot(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The remaining args and kwargs are the same as for
:meth:`~matplotlib.axes.Axes.plot`.
Return a list of 2 :class:`~matplotlib.lines.Line2D` containing
respectively:
- the lines plotted for triangles edges
- the markers plotted for triangles nodes
**Example:**
.. plot:: mpl_examples/pylab_examples/triplot_demo.py
"""
import matplotlib.axes
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
x, y, edges = (tri.x, tri.y, tri.edges)
# Decode plot format string, e.g., 'ro-'
fmt = ""
if len(args) > 0:
fmt = args[0]
linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)
# Insert plot format string into a copy of kwargs (kwargs values prevail).
kw = kwargs.copy()
for key, val in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if val is not None:
kw[key] = kwargs.get(key, val)
# Draw lines without markers.
# Note 1: If we drew markers here, most markers would be drawn more than
# once as they belong to several edges.
# Note 2: We insert nan values in the flattened edges arrays rather than
# plotting directly (triang.x[edges].T, triang.y[edges].T)
# as it considerably speeds-up code execution.
linestyle = kw['linestyle']
kw_lines = kw.copy()
kw_lines['marker'] = 'None' # No marker to draw.
kw_lines['zorder'] = kw.get('zorder', 1) # Path default zorder is used.
if (linestyle is not None) and (linestyle not in ['None', '', ' ']):
tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)
tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)
tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(),
**kw_lines)
else:
tri_lines = ax.plot([], [], **kw_lines)
# Draw markers separately.
marker = kw['marker']
kw_markers = kw.copy()
kw_markers['linestyle'] = 'None' # No line to draw.
if (marker is not None) and (marker not in ['None', '', ' ']):
tri_markers = ax.plot(x, y, **kw_markers)
else:
tri_markers = ax.plot([], [], **kw_markers)
return tri_lines + tri_markers
| apache-2.0 |
magvugr/AT | EntVirtual/lib/python2.7/site-packages/django/templatetags/tz.py | 277 | 5572 | from datetime import datetime, tzinfo
from django.template import Library, Node, TemplateSyntaxError
from django.utils import six, timezone
try:
import pytz
except ImportError:
pytz = None
register = Library()
# HACK: datetime is an old-style class, create a new-style equivalent
# so we can define additional attributes.
class datetimeobject(datetime, object):
pass
# Template filters
@register.filter
def localtime(value):
"""
Converts a datetime to local time in the active time zone.
This only makes sense within a {% localtime off %} block.
"""
return do_timezone(value, timezone.get_current_timezone())
@register.filter
def utc(value):
"""
Converts a datetime to UTC.
"""
return do_timezone(value, timezone.utc)
@register.filter('timezone')
def do_timezone(value, arg):
"""
Converts a datetime to local time in a given time zone.
The argument must be an instance of a tzinfo subclass or a time zone name.
If it is a time zone name, pytz is required.
Naive datetimes are assumed to be in local time in the default time zone.
"""
if not isinstance(value, datetime):
return ''
# Obtain a timezone-aware datetime
try:
if timezone.is_naive(value):
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
# Filters must never raise exceptions, and pytz' exceptions inherit
# Exception directly, not a specific subclass. So catch everything.
except Exception:
return ''
# Obtain a tzinfo instance
if isinstance(arg, tzinfo):
tz = arg
elif isinstance(arg, six.string_types) and pytz is not None:
try:
tz = pytz.timezone(arg)
except pytz.UnknownTimeZoneError:
return ''
else:
return ''
result = timezone.localtime(value, tz)
# HACK: the convert_to_local_time flag will prevent
# automatic conversion of the value to local time.
result = datetimeobject(result.year, result.month, result.day,
result.hour, result.minute, result.second,
result.microsecond, result.tzinfo)
result.convert_to_local_time = False
return result
# Template tags
class LocalTimeNode(Node):
"""
Template node class used by ``localtime_tag``.
"""
def __init__(self, nodelist, use_tz):
self.nodelist = nodelist
self.use_tz = use_tz
def render(self, context):
old_setting = context.use_tz
context.use_tz = self.use_tz
output = self.nodelist.render(context)
context.use_tz = old_setting
return output
class TimezoneNode(Node):
"""
Template node class used by ``timezone_tag``.
"""
def __init__(self, nodelist, tz):
self.nodelist = nodelist
self.tz = tz
def render(self, context):
with timezone.override(self.tz.resolve(context)):
output = self.nodelist.render(context)
return output
class GetCurrentTimezoneNode(Node):
"""
Template node class used by ``get_current_timezone_tag``.
"""
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = timezone.get_current_timezone_name()
return ''
@register.tag('localtime')
def localtime_tag(parser, token):
"""
Forces or prevents conversion of datetime objects to local time,
regardless of the value of ``settings.USE_TZ``.
Sample usage::
{% localtime off %}{{ value_in_utc }}{% endlocaltime %}
"""
bits = token.split_contents()
if len(bits) == 1:
use_tz = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" %
bits[0])
else:
use_tz = bits[1] == 'on'
nodelist = parser.parse(('endlocaltime',))
parser.delete_first_token()
return LocalTimeNode(nodelist, use_tz)
@register.tag('timezone')
def timezone_tag(parser, token):
"""
Enables a given time zone just for this block.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, the default time zone is used within the block.
Sample usage::
{% timezone "Europe/Paris" %}
It is {{ now }} in Paris.
{% endtimezone %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (timezone)" %
bits[0])
tz = parser.compile_filter(bits[1])
nodelist = parser.parse(('endtimezone',))
parser.delete_first_token()
return TimezoneNode(nodelist, tz)
@register.tag("get_current_timezone")
def get_current_timezone_tag(parser, token):
"""
Stores the name of the current time zone in the context.
Usage::
{% get_current_timezone as TIME_ZONE %}
This will fetch the currently active time zone and put its name
into the ``TIME_ZONE`` context variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_timezone' requires "
"'as variable' (got %r)" % args)
return GetCurrentTimezoneNode(args[2])
| gpl-3.0 |
ArnossArnossi/django | tests/generic_inline_admin/tests.py | 154 | 22749 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.admin import GenericTabularInline
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.forms.formsets import DEFAULT_MAX_NUM
from django.forms.models import ModelForm
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from .admin import MediaInline, MediaPermanentInline, site as admin_site
from .models import Category, Episode, EpisodePermanent, Media, PhoneNumber
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
# Set DEBUG to True to ensure {% include %} will raise exceptions.
# That is how inlines are rendered and #9498 will bubble up if it is an issue.
@override_settings(
DEBUG=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls",
)
class GenericAdminViewTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse('admin:generic_inline_admin_episode_add'))
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
)
self.assertEqual(response.status_code, 200)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse('admin:generic_inline_admin_episode_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_generic_inline_formset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, exclude=['description', 'keywords'], extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>')
def test_generic_inline_formset_factory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminParametersTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
self.factory = RequestFactory()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def test_no_param(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def test_extra_param(self):
"""
With extra=0, there should be one form.
"""
class ExtraInline(GenericTabularInline):
model = Media
extra = 0
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [ExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
class MaxNumInline(GenericTabularInline):
model = Media
extra = 5
max_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
def test_min_num_param(self):
"""
With extra=3 and min_num=2, there should be five forms.
"""
class MinNumInline(GenericTabularInline):
model = Media
extra = 3
min_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 5)
self.assertEqual(formset.initial_form_count(), 1)
def test_get_extra(self):
class GetExtraInline(GenericTabularInline):
model = Media
extra = 4
def get_extra(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.extra, 2)
def test_get_min_num(self):
class GetMinNumInline(GenericTabularInline):
model = Media
min_num = 5
def get_min_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.min_num, 2)
def test_get_max_num(self):
class GetMaxNumInline(GenericTabularInline):
model = Media
extra = 5
def get_max_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.max_num, 2)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminWithUniqueTogetherTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
def test_add(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get(reverse('admin:generic_inline_admin_contact_add'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('admin:generic_inline_admin_contact_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_delete(self):
from .models import Contact
c = Contact.objects.create(name='foo')
PhoneNumber.objects.create(
object_id=c.id,
content_type=ContentType.objects.get_for_model(Contact),
phone_number="555-555-5555",
)
response = self.client.post(reverse('admin:generic_inline_admin_contact_delete', args=[c.pk]))
self.assertContains(response, 'Are you sure you want to delete')
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class NoInlineDeletionTest(SimpleTestCase):
def test_no_deletion(self):
inline = MediaPermanentInline(EpisodePermanent, admin_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineModelAdminTest(SimpleTestCase):
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, DEFAULT_MAX_NUM)
self.assertEqual(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertEqual(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class MediaForm(ModelForm):
class Meta:
model = Media
fields = '__all__'
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['url', 'description']})]
ma = MediaInline(Media, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['url', 'description'])
def test_get_formsets_with_inlines_returns_tuples(self):
"""
Ensure that get_formsets_with_inlines() returns the correct tuples.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class AlternateInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
AlternateInline, MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
inlines = ma.get_inline_instances(request)
for (formset, inline), other_inline in zip(ma.get_formsets_with_inlines(request), inlines):
self.assertIsInstance(formset, other_inline.get_formset(request).__class__)
| bsd-3-clause |
azumimuo/family-xbmc-addon | plugin.video.elysium/resources/lib/sources/tvhd.py | 1 | 3687 | import re
import urllib
import requests
import urlparse
import json
import xbmc
import datetime
from BeautifulSoup import BeautifulSoup
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from schism_commons import quality_tag, google_tag, parseDOM, replaceHTMLCodes ,cleantitle_get, cleantitle_get_2, cleantitle_query, get_size, cleantitle_get_full
from schism_net import OPEN_URL
debridstatus = control.setting('debridsources')
class source:
def __init__(self):
self.base_link = 'http://tvshows-hdtv.org'
self.search_link = '/_new.episodes.%s.html'
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
if not debridstatus == 'true': raise Exception()
url = {'tvshowtitle': tvshowtitle}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.elysium_url = []
try:
if not debridstatus == 'true': raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
data['season'], data['episode'] = season, episode
today = datetime.datetime.today().date()
today = today.strftime('%Y.%m.%d')
title = cleantitle_get(title)
titlecheck = "s%02de%02d" % (int(data['season']), int(data['episode']))
titlecheck = title + titlecheck
premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
year = premiered[0]
days = premiered[-1]
month = premiered[1]
next_day = int(days) + 1
ep_date = "%s.%02d.%02d" % (year,int(month),int(days))
# print ("HDTV PREMIERE", ep_date , today)
if int(re.sub('[^0-9]', '', str(ep_date))) > int(re.sub('[^0-9]', '', str(today))): raise Exception()
ep_next_date = "%s.%02d.%02d" % (year,int(month),int(next_day))
# print ("HDTV PREMIERE", ep_date, ep_next_date)
# print ("HDTV PREMIERE", today, ep_date, ep_next_date)
for day in [ep_date, ep_next_date]:
html = self.search_link % day
html = urlparse.urljoin(self.base_link, html)
# print ("HDTV PREMIERE 2 ", html)
r = OPEN_URL(html).content
for match in re.finditer('<center>\s*<b>\s*(.*?)\s*</b>.*?<tr>(.*?)</tr>', r, re.DOTALL):
release, links = match.groups()
release = re.sub('</?[^>]*>', '', release)
release = cleantitle_get(release)
if titlecheck in release:
# print ("HDTV PREMIERE 3 FOUND", release , links)
self.elysium_url.append([release,links])
return self.elysium_url
except:
pass
def sources(self, url, hostDict, hostprDict):
try:
sources = []
count = 0
for title,url in self.elysium_url:
quality = quality_tag(title)
for match in re.finditer('href="([^"]+)', url):
url = match.group(1)
try: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
except: host = "none"
if any(value in url for value in hostprDict):
# print ("HDTV SOURCES", quality, url)
url = url.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'tvhd', 'url': url, 'direct': False, 'debridonly': True})
return sources
except:
return sources
def resolve(self, url):
return url | gpl-2.0 |
sudheerchintala/LearnEraPlatForm | common/lib/symmath/symmath/formula.py | 46 | 25839 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Flexible python representation of a symbolic mathematical formula.
Acceptes Presentation MathML, Content MathML (and could also do OpenMath).
Provides sympy representation.
"""
#
# File: formula.py
# Date: 04-May-12 (creation)
# Author: I. Chuang <[email protected]>
#
import os
import string # pylint: disable=W0402
import re
import logging
import operator
import requests
import sympy
from sympy.printing.latex import LatexPrinter
from sympy.printing.str import StrPrinter
from sympy import latex, sympify
from sympy.physics.quantum.qubit import Qubit
from sympy.physics.quantum.state import Ket
from xml.sax.saxutils import unescape
import unicodedata
from lxml import etree
#import subprocess
from copy import deepcopy
log = logging.getLogger(__name__)
log.warning("Dark code. Needs review before enabling in prod.")
os.environ['PYTHONIOENCODING'] = 'utf-8'
#-----------------------------------------------------------------------------
class dot(sympy.operations.LatticeOp): # pylint: disable=invalid-name, no-member
"""my dot product"""
zero = sympy.Symbol('dotzero')
identity = sympy.Symbol('dotidentity')
def _print_dot(_self, expr):
"""Print statement used for LatexPrinter"""
return r'{((%s) \cdot (%s))}' % (expr.args[0], expr.args[1])
LatexPrinter._print_dot = _print_dot # pylint: disable=protected-access
#-----------------------------------------------------------------------------
# unit vectors (for 8.02)
def _print_hat(_self, expr):
"""Print statement used for LatexPrinter"""
return '\\hat{%s}' % str(expr.args[0]).lower()
LatexPrinter._print_hat = _print_hat # pylint: disable=protected-access
StrPrinter._print_hat = _print_hat # pylint: disable=protected-access
#-----------------------------------------------------------------------------
# helper routines
def to_latex(expr):
"""
Convert expression to latex mathjax format
"""
if expr is None:
return ''
expr_s = latex(expr)
expr_s = expr_s.replace(r'\XI', 'XI') # workaround for strange greek
# substitute back into latex form for scripts
# literally something of the form
# 'scriptN' becomes '\\mathcal{N}'
# note: can't use something akin to the _print_hat method above because we sometimes get 'script(N)__B' or more complicated terms
expr_s = re.sub(
r'script([a-zA-Z0-9]+)',
'\\mathcal{\\1}',
expr_s
)
#return '<math>%s{}{}</math>' % (xs[1:-1])
if expr_s[0] == '$':
return '[mathjax]%s[/mathjax]<br>' % (expr_s[1:-1]) # for sympy v6
return '[mathjax]%s[/mathjax]<br>' % (expr_s) # for sympy v7
def my_evalf(expr, chop=False):
"""
Enhanced sympy evalf to handle lists of expressions
and catch eval failures without dropping out.
"""
if type(expr) == list:
try:
return [x.evalf(chop=chop) for x in expr]
except:
return expr
try:
return expr.evalf(chop=chop)
except:
return expr
def my_sympify(expr, normphase=False, matrix=False, abcsym=False, do_qubit=False, symtab=None):
"""
Version of sympify to import expression into sympy
"""
# make all lowercase real?
if symtab:
varset = symtab
else:
varset = {'p': sympy.Symbol('p'),
'g': sympy.Symbol('g'),
'e': sympy.E, # for exp
'i': sympy.I, # lowercase i is also sqrt(-1)
'Q': sympy.Symbol('Q'), # otherwise it is a sympy "ask key"
'I': sympy.Symbol('I'), # otherwise it is sqrt(-1)
'N': sympy.Symbol('N'), # or it is some kind of sympy function
'ZZ': sympy.Symbol('ZZ'), # otherwise it is the PythonIntegerRing
'XI': sympy.Symbol('XI'), # otherwise it is the capital \XI
'hat': sympy.Function('hat'), # for unit vectors (8.02)
}
if do_qubit: # turn qubit(...) into Qubit instance
varset.update({'qubit': Qubit,
'Ket': Ket,
'dot': dot,
'bit': sympy.Function('bit'),
})
if abcsym: # consider all lowercase letters as real symbols, in the parsing
for letter in string.lowercase:
if letter in varset: # exclude those already done
continue
varset.update({letter: sympy.Symbol(letter, real=True)})
sexpr = sympify(expr, locals=varset)
if normphase: # remove overall phase if sexpr is a list
if type(sexpr) == list:
if sexpr[0].is_number:
ophase = sympy.sympify('exp(-I*arg(%s))' % sexpr[0])
sexpr = [sympy.Mul(x, ophase) for x in sexpr]
def to_matrix(expr):
"""
Convert a list, or list of lists to a matrix.
"""
# if expr is a list of lists, and is rectangular, then return Matrix(expr)
if not type(expr) == list:
return expr
for row in expr:
if (not type(row) == list):
return expr
rdim = len(expr[0])
for row in expr:
if not len(row) == rdim:
return expr
return sympy.Matrix(expr)
if matrix:
sexpr = to_matrix(sexpr)
return sexpr
#-----------------------------------------------------------------------------
# class for symbolic mathematical formulas
class formula(object):
"""
Representation of a mathematical formula object. Accepts mathml math expression
for constructing, and can produce sympy translation. The formula may or may not
include an assignment (=).
"""
def __init__(self, expr, asciimath='', options=None):
self.expr = expr.strip()
self.asciimath = asciimath
self.the_cmathml = None
self.the_sympy = None
self.options = options
def is_presentation_mathml(self):
"""
Check if formula is in mathml presentation format.
"""
return '<mstyle' in self.expr
def is_mathml(self):
"""
Check if formula is in mathml format.
"""
return '<math ' in self.expr
def fix_greek_in_mathml(self, xml):
"""
Recursively fix greek letters in passed in xml.
"""
def gettag(expr):
return re.sub('{http://[^}]+}', '', expr.tag)
for k in xml:
tag = gettag(k)
if tag == 'mi' or tag == 'ci':
usym = unicode(k.text)
try:
udata = unicodedata.name(usym)
except Exception:
udata = None
# print "usym = %s, udata=%s" % (usym,udata)
if udata: # eg "GREEK SMALL LETTER BETA"
if 'GREEK' in udata:
usym = udata.split(' ')[-1]
if 'SMALL' in udata:
usym = usym.lower()
#print "greek: ",usym
k.text = usym
self.fix_greek_in_mathml(k)
return xml
def preprocess_pmathml(self, xml):
r"""
Pre-process presentation MathML from ASCIIMathML to make it more
acceptable for SnuggleTeX, and also to accomodate some sympy
conventions (eg hat(i) for \hat{i}).
This method would be a good spot to look for an integral and convert
it, if possible...
"""
if type(xml) == str or type(xml) == unicode:
xml = etree.fromstring(xml) # TODO: wrap in try
xml = self.fix_greek_in_mathml(xml) # convert greek utf letters to greek spelled out in ascii
def gettag(expr):
return re.sub('{http://[^}]+}', '', expr.tag)
def fix_pmathml(xml):
"""
f and g are processed as functions by asciimathml, eg "f-2" turns
into "<mrow><mi>f</mi><mo>-</mo></mrow><mn>2</mn>" this is
really terrible for turning into cmathml. undo this here.
"""
for k in xml:
tag = gettag(k)
if tag == 'mrow':
if len(k) == 2:
if gettag(k[0]) == 'mi' and k[0].text in ['f', 'g'] and gettag(k[1]) == 'mo':
idx = xml.index(k)
xml.insert(idx, deepcopy(k[0])) # drop the <mrow> container
xml.insert(idx + 1, deepcopy(k[1]))
xml.remove(k)
fix_pmathml(k)
fix_pmathml(xml)
def fix_hat(xml):
"""
hat i is turned into <mover><mi>i</mi><mo>^</mo></mover> ; mangle
this into <mi>hat(f)</mi> hat i also somtimes turned into
<mover><mrow> <mi>j</mi> </mrow><mo>^</mo></mover>
"""
for k in xml:
tag = gettag(k)
if tag == 'mover':
if len(k) == 2:
if gettag(k[0]) == 'mi' and gettag(k[1]) == 'mo' and str(k[1].text) == '^':
newk = etree.Element('mi')
newk.text = 'hat(%s)' % k[0].text
xml.replace(k, newk)
if gettag(k[0]) == 'mrow' and gettag(k[0][0]) == 'mi' and gettag(k[1]) == 'mo' and str(k[1].text) == '^':
newk = etree.Element('mi')
newk.text = 'hat(%s)' % k[0][0].text
xml.replace(k, newk)
fix_hat(k)
fix_hat(xml)
def flatten_pmathml(xml):
"""
Give the text version of certain PMathML elements
Sometimes MathML will be given with each letter separated (it
doesn't know if its implicit multiplication or what). From an xml
node, find the (text only) variable name it represents. So it takes
<mrow>
<mi>m</mi>
<mi>a</mi>
<mi>x</mi>
</mrow>
and returns 'max', for easier use later on.
"""
tag = gettag(xml)
if tag == 'mn':
return xml.text
elif tag == 'mi':
return xml.text
elif tag == 'mrow':
return ''.join([flatten_pmathml(y) for y in xml])
raise Exception('[flatten_pmathml] unknown tag %s' % tag)
def fix_mathvariant(parent):
"""
Fix certain kinds of math variants
Literally replace <mstyle mathvariant="script"><mi>N</mi></mstyle>
with 'scriptN'. There have been problems using script_N or script(N)
"""
for child in parent:
if (gettag(child) == 'mstyle' and child.get('mathvariant') == 'script'):
newchild = etree.Element('mi')
newchild.text = 'script%s' % flatten_pmathml(child[0])
parent.replace(child, newchild)
fix_mathvariant(child)
fix_mathvariant(xml)
# find "tagged" superscripts
# they have the character \u200b in the superscript
# replace them with a__b so snuggle doesn't get confused
def fix_superscripts(xml):
""" Look for and replace sup elements with 'X__Y' or 'X_Y__Z'
In the javascript, variables with '__X' in them had an invisible
character inserted into the sup (to distinguish from powers)
E.g. normal:
<msubsup>
<mi>a</mi>
<mi>b</mi>
<mi>c</mi>
</msubsup>
to be interpreted '(a_b)^c' (nothing done by this method)
And modified:
<msubsup>
<mi>b</mi>
<mi>x</mi>
<mrow>
<mo>​</mo>
<mi>d</mi>
</mrow>
</msubsup>
to be interpreted 'a_b__c'
also:
<msup>
<mi>x</mi>
<mrow>
<mo>​</mo>
<mi>B</mi>
</mrow>
</msup>
to be 'x__B'
"""
for k in xml:
tag = gettag(k)
# match things like the last example--
# the second item in msub is an mrow with the first
# character equal to \u200b
if (
tag == 'msup' and
len(k) == 2 and gettag(k[1]) == 'mrow' and
gettag(k[1][0]) == 'mo' and k[1][0].text == u'\u200b' # whew
):
# replace the msup with 'X__Y'
k[1].remove(k[1][0])
newk = etree.Element('mi')
newk.text = '%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]))
xml.replace(k, newk)
# match things like the middle example-
# the third item in msubsup is an mrow with the first
# character equal to \u200b
if (
tag == 'msubsup' and
len(k) == 3 and gettag(k[2]) == 'mrow' and
gettag(k[2][0]) == 'mo' and k[2][0].text == u'\u200b' # whew
):
# replace the msubsup with 'X_Y__Z'
k[2].remove(k[2][0])
newk = etree.Element('mi')
newk.text = '%s_%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]), flatten_pmathml(k[2]))
xml.replace(k, newk)
fix_superscripts(k)
fix_superscripts(xml)
def fix_msubsup(parent):
"""
Snuggle returns an error when it sees an <msubsup> replace such
elements with an <msup>, except the first element is of
the form a_b. I.e. map a_b^c => (a_b)^c
"""
for child in parent:
# fix msubsup
if (gettag(child) == 'msubsup' and len(child) == 3):
newchild = etree.Element('msup')
newbase = etree.Element('mi')
newbase.text = '%s_%s' % (flatten_pmathml(child[0]), flatten_pmathml(child[1]))
newexp = child[2]
newchild.append(newbase)
newchild.append(newexp)
parent.replace(child, newchild)
fix_msubsup(child)
fix_msubsup(xml)
self.xml = xml # pylint: disable=attribute-defined-outside-init
return self.xml
def get_content_mathml(self):
if self.the_cmathml:
return self.the_cmathml
# pre-process the presentation mathml before sending it to snuggletex to convert to content mathml
try:
xml = self.preprocess_pmathml(self.expr)
except Exception, err:
log.warning('Err %s while preprocessing; expr=%s', err, self.expr)
return "<html>Error! Cannot process pmathml</html>"
pmathml = etree.tostring(xml, pretty_print=True)
self.the_pmathml = pmathml # pylint: disable=attribute-defined-outside-init
# convert to cmathml
self.the_cmathml = self.GetContentMathML(self.asciimath, pmathml)
return self.the_cmathml
cmathml = property(get_content_mathml, None, None, 'content MathML representation')
def make_sympy(self, xml=None):
"""
Return sympy expression for the math formula.
The math formula is converted to Content MathML then that is parsed.
This is a recursive function, called on every CMML node. Support for
more functions can be added by modifying opdict, abould halfway down
"""
if self.the_sympy:
return self.the_sympy
if xml is None: # root
if not self.is_mathml():
return my_sympify(self.expr)
if self.is_presentation_mathml():
cmml = None
try:
cmml = self.cmathml
xml = etree.fromstring(str(cmml))
except Exception, err:
if 'conversion from Presentation MathML to Content MathML was not successful' in cmml:
msg = "Illegal math expression"
else:
msg = 'Err %s while converting cmathml to xml; cmml=%s' % (err, cmml)
raise Exception(msg)
xml = self.fix_greek_in_mathml(xml)
self.the_sympy = self.make_sympy(xml[0])
else:
xml = etree.fromstring(self.expr)
xml = self.fix_greek_in_mathml(xml)
self.the_sympy = self.make_sympy(xml[0])
return self.the_sympy
def gettag(expr):
return re.sub('{http://[^}]+}', '', expr.tag)
# simple math
def op_divide(*args):
if not len(args) == 2:
raise Exception('divide given wrong number of arguments!')
# print "divide: arg0=%s, arg1=%s" % (args[0],args[1])
return sympy.Mul(args[0], sympy.Pow(args[1], -1))
def op_plus(*args):
return args[0] if len(args) == 1 else op_plus(*args[:-1]) + args[-1]
def op_times(*args):
return reduce(operator.mul, args)
def op_minus(*args):
if len(args) == 1:
return -args[0]
if not len(args) == 2:
raise Exception('minus given wrong number of arguments!')
#return sympy.Add(args[0],-args[1])
return args[0] - args[1]
opdict = {
'plus': op_plus,
'divide': operator.div, # should this be op_divide?
'times': op_times,
'minus': op_minus,
'root': sympy.sqrt,
'power': sympy.Pow,
'sin': sympy.sin,
'cos': sympy.cos,
'tan': sympy.tan,
'cot': sympy.cot,
'sinh': sympy.sinh,
'cosh': sympy.cosh,
'coth': sympy.coth,
'tanh': sympy.tanh,
'asin': sympy.asin,
'acos': sympy.acos,
'atan': sympy.atan,
'atan2': sympy.atan2,
'acot': sympy.acot,
'asinh': sympy.asinh,
'acosh': sympy.acosh,
'atanh': sympy.atanh,
'acoth': sympy.acoth,
'exp': sympy.exp,
'log': sympy.log,
'ln': sympy.ln,
}
# simple symbols - TODO is this code used?
nums1dict = {
'pi': sympy.pi,
}
def parsePresentationMathMLSymbol(xml):
"""
Parse <msub>, <msup>, <mi>, and <mn>
"""
tag = gettag(xml)
if tag == 'mn':
return xml.text
elif tag == 'mi':
return xml.text
elif tag == 'msub':
return '_'.join([parsePresentationMathMLSymbol(y) for y in xml])
elif tag == 'msup':
return '^'.join([parsePresentationMathMLSymbol(y) for y in xml])
raise Exception('[parsePresentationMathMLSymbol] unknown tag %s' % tag)
# parser tree for Content MathML
tag = gettag(xml)
# first do compound objects
if tag == 'apply': # apply operator
opstr = gettag(xml[0])
if opstr in opdict:
op = opdict[opstr] # pylint: disable=invalid-name
args = [self.make_sympy(expr) for expr in xml[1:]]
try:
res = op(*args)
except Exception, err:
self.args = args # pylint: disable=attribute-defined-outside-init
self.op = op # pylint: disable=attribute-defined-outside-init, invalid-name
raise Exception('[formula] error=%s failed to apply %s to args=%s' % (err, opstr, args))
return res
else:
raise Exception('[formula]: unknown operator tag %s' % (opstr))
elif tag == 'list': # square bracket list
if gettag(xml[0]) == 'matrix':
return self.make_sympy(xml[0])
else:
return [self.make_sympy(expr) for expr in xml]
elif tag == 'matrix':
return sympy.Matrix([self.make_sympy(expr) for expr in xml])
elif tag == 'vector':
return [self.make_sympy(expr) for expr in xml]
# atoms are below
elif tag == 'cn': # number
return sympy.sympify(xml.text)
# return float(xml.text)
elif tag == 'ci': # variable (symbol)
if len(xml) > 0 and (gettag(xml[0]) == 'msub' or gettag(xml[0]) == 'msup'): # subscript or superscript
usym = parsePresentationMathMLSymbol(xml[0])
sym = sympy.Symbol(str(usym))
else:
usym = unicode(xml.text)
if 'hat' in usym:
sym = my_sympify(usym)
else:
if usym == 'i' and self.options is not None and 'imaginary' in self.options: # i = sqrt(-1)
sym = sympy.I
else:
sym = sympy.Symbol(str(usym))
return sym
else: # unknown tag
raise Exception('[formula] unknown tag %s' % tag)
sympy = property(make_sympy, None, None, 'sympy representation')
def GetContentMathML(self, asciimath, mathml):
"""
Handle requests to snuggletex API to convert the Ascii math to MathML
"""
# url = 'http://192.168.1.2:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo'
# url = 'http://127.0.0.1:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo'
url = 'https://math-xserver.mitx.mit.edu/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo'
if 1:
payload = {
'asciiMathInput': asciimath,
'asciiMathML': mathml,
#'asciiMathML':unicode(mathml).encode('utf-8'),
}
headers = {'User-Agent': "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080311 Firefox/2.0.0.13"}
request = requests.post(url, data=payload, headers=headers, verify=False)
request.encoding = 'utf-8'
ret = request.text
# print "encoding: ", request.encoding
mode = 0
cmathml = []
for k in ret.split('\n'):
if 'conversion to Content MathML' in k:
mode = 1
continue
if mode == 1:
if '<h3>Maxima Input Form</h3>' in k:
mode = 0
continue
cmathml.append(k)
cmathml = '\n'.join(cmathml[2:])
cmathml = '<math xmlns="http://www.w3.org/1998/Math/MathML">\n' + unescape(cmathml) + '\n</math>'
# print cmathml
return cmathml
#-----------------------------------------------------------------------------
def test1():
"""Test XML strings - addition"""
xmlstr = """
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<plus/>
<cn>1</cn>
<cn>2</cn>
</apply>
</math>
"""
return formula(xmlstr)
def test2():
"""Test XML strings - addition, Greek alpha"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<plus/>
<cn>1</cn>
<apply>
<times/>
<cn>2</cn>
<ci>α</ci>
</apply>
</apply>
</math>
"""
return formula(xmlstr)
def test3():
"""Test XML strings - addition, Greek gamma"""
xmlstr = """
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<divide/>
<cn>1</cn>
<apply>
<plus/>
<cn>2</cn>
<ci>γ</ci>
</apply>
</apply>
</math>
"""
return formula(xmlstr)
def test4():
"""Test XML strings - addition, Greek alpha, mfrac"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mn>1</mn>
<mo>+</mo>
<mfrac>
<mn>2</mn>
<mi>α</mi>
</mfrac>
</mstyle>
</math>
"""
return formula(xmlstr)
def test5():
"""Test XML strings - sum of two matrices"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mrow>
<mi>cos</mi>
<mrow>
<mo>(</mo>
<mi>θ</mi>
<mo>)</mo>
</mrow>
</mrow>
<mo>⋅</mo>
<mrow>
<mo>[</mo>
<mtable>
<mtr>
<mtd>
<mn>1</mn>
</mtd>
<mtd>
<mn>0</mn>
</mtd>
</mtr>
<mtr>
<mtd>
<mn>0</mn>
</mtd>
<mtd>
<mn>1</mn>
</mtd>
</mtr>
</mtable>
<mo>]</mo>
</mrow>
<mo>+</mo>
<mrow>
<mo>[</mo>
<mtable>
<mtr>
<mtd>
<mn>0</mn>
</mtd>
<mtd>
<mn>1</mn>
</mtd>
</mtr>
<mtr>
<mtd>
<mn>1</mn>
</mtd>
<mtd>
<mn>0</mn>
</mtd>
</mtr>
</mtable>
<mo>]</mo>
</mrow>
</mstyle>
</math>
"""
return formula(xmlstr)
def test6():
"""Test XML strings - imaginary numbers"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mn>1</mn>
<mo>+</mo>
<mi>i</mi>
</mstyle>
</math>
"""
return formula(xmlstr, options='imaginary')
| agpl-3.0 |
yland/coala | coalib/parsing/StringProcessing/Core.py | 9 | 20330 | import re
from coalib.parsing.StringProcessing import InBetweenMatch
from coalib.parsing.StringProcessing.Filters import limit, trim_empty_matches
def search_for(pattern, string, flags=0, max_match=0, use_regex=False):
"""
Searches for a given pattern in a string.
:param pattern: A pattern that defines what to match.
:param string: The string to search in.
:param flags: Additional flags to pass to the regex processor.
:param max_match: Defines the maximum number of matches to perform. If 0 or
less is provided, the number of splits is not limited.
:param use_regex: Specifies whether to treat the pattern as a regex or
simple string.
:return: An iterator returning MatchObject's.
"""
if not use_regex:
pattern = re.escape(pattern)
return limit(re.finditer(pattern, string, flags), max_match)
def unescaped_search_for(pattern,
string,
flags=0,
max_match=0,
use_regex=False):
"""
Searches for a given pattern in a string that is not escaped.
:param pattern: A pattern that defines what to match unescaped.
:param string: The string to search in.
:param flags: Additional flags to pass to the regex processor.
:param max_match: Defines the maximum number of matches to perform. If 0 or
less is provided, the number of splits is not limited.
:param use_regex: Specifies whether to treat the pattern as a regex or
simple string.
:return: An iterator returning MatchObject's.
"""
_iter = limit(
filter(lambda match: not position_is_escaped(string, match.start()),
search_for(pattern, string, flags, 0, use_regex)),
max_match)
for elem in _iter:
yield elem
def _split(string,
max_split,
remove_empty_matches,
matching_function,
*args,
**kwargs):
"""
Splits a string using a given matching-function that matches the separator.
This function implements general features needed from the split functions
in this module (the max-split and remove-empty-matches features).
:param string: The string where to split.
:param max_split: Defines the maximum number of splits. If 0 or
less is provided, the number of splits is not
limited.
:param remove_empty_matches: Defines whether empty entries should
be removed from the result.
:param matching_function: The matching function. It must return
MatchObject's containing the matched
split-separator.
:param args: Positional arguments to invoke the
matching_function with.
:param kwargs: Key-value arguments to invoke the
matching_function with.
"""
last_end_pos = 0
for match in matching_function(*args, **kwargs):
split_string = string[last_end_pos: match.start()]
last_end_pos = match.end()
if not remove_empty_matches or len(split_string) != 0:
yield split_string
max_split -= 1
if max_split == 0:
break # only reachable when max_split > 0
# Append the rest of the string.
if not remove_empty_matches or len(string) > last_end_pos:
yield string[last_end_pos:]
def split(pattern,
string,
max_split=0,
remove_empty_matches=False,
use_regex=False):
"""
Splits the given string by the specified pattern. The return character (\\n)
is not a natural split pattern (if you don't specify it yourself).
This function ignores escape sequences.
:param pattern: A pattern that defines where to split.
:param string: The string to split by the defined pattern.
:param max_split: Defines the maximum number of splits. If 0 or
less is provided, the number of splits is not
limited.
:param remove_empty_matches: Defines whether empty entries should
be removed from the result.
:param use_regex: Specifies whether to treat the split pattern
as a regex or simple string.
:return: An iterator returning the split up strings.
"""
return _split(string,
max_split,
remove_empty_matches,
search_for,
pattern,
string,
0,
0,
use_regex)
def unescaped_split(pattern,
string,
max_split=0,
remove_empty_matches=False,
use_regex=False):
"""
Splits the given string by the specified pattern. The return character (\\n)
is not a natural split pattern (if you don't specify it yourself).
This function handles escaped split-patterns (and so splits only patterns
that are unescaped).
:param pattern: A pattern that defines where to split.
:param string: The string to split by the defined pattern.
:param max_split: Defines the maximum number of splits. If 0 or
less is provided, the number of splits is not
limited.
:param remove_empty_matches: Defines whether empty entries should
be removed from the result.
:param use_regex: Specifies whether to treat the split pattern
as a regex or simple string.
:return: An iterator returning the split up strings.
"""
return _split(string,
max_split,
remove_empty_matches,
unescaped_search_for,
pattern,
string,
0,
0,
use_regex)
def search_in_between(begin,
end,
string,
max_matches=0,
remove_empty_matches=False,
use_regex=False):
"""
Searches for a string enclosed between a specified begin- and end-sequence.
Also enclosed \\n are put into the result. Doesn't handle escape sequences.
:param begin: A pattern that defines where to start
matching.
:param end: A pattern that defines where to end matching.
:param string: The string where to search in.
:param max_matches: Defines the maximum number of matches. If 0 or
less is provided, the number of matches is not
limited.
:param remove_empty_matches: Defines whether empty entries should
be removed from the result. An entry is
considered empty if no inner match was
performed (regardless of matched start and
end patterns).
:param use_regex: Specifies whether to treat the begin and end
patterns as regexes or simple strings.
:return: An iterator returning InBetweenMatch objects
that hold information about the matched begin,
inside and end string matched.
"""
if not use_regex:
begin = re.escape(begin)
end = re.escape(end)
# No need to compile the begin sequence, capturing groups get escaped.
begin_pattern_groups = 0
else:
# Compilation of the begin sequence is needed to get the number of
# capturing groups in it.
begin_pattern_groups = re.compile(begin).groups
# Regex explanation:
# 1. (begin) A capturing group that matches the begin sequence.
# 2. (.*?) Match any char unlimited times, as few times as possible. Save
# the match in the second capturing group (`match.group(2)`).
# 3. (end) A capturing group that matches the end sequence.
# Because the previous group is lazy (matches as few times as
# possible) the next occurring end-sequence is matched.
regex = "(" + begin + ")(.*?)(" + end + ")"
matches = re.finditer(regex, string, re.DOTALL)
if remove_empty_matches:
matches = trim_empty_matches(matches,
(begin_pattern_groups + 2,))
matches = limit(matches, max_matches)
for m in matches:
yield InBetweenMatch.from_values(m.group(1),
m.start(1),
m.group(begin_pattern_groups + 2),
m.start(begin_pattern_groups + 2),
m.group(begin_pattern_groups + 3),
m.start(begin_pattern_groups + 3))
def unescaped_search_in_between(begin,
end,
string,
max_matches=0,
remove_empty_matches=False,
use_regex=False):
"""
Searches for a string enclosed between a specified begin- and end-sequence.
Also enclosed \\n are put into the result.
Handles escaped begin- and end-sequences (and so only patterns that are
unescaped).
.. warning::
Using the escape character '\\' in the begin- or end-sequences
the function can return strange results. The backslash can
interfere with the escaping regex-sequence used internally to
match the enclosed string.
:param begin: A regex pattern that defines where to start
matching.
:param end: A regex pattern that defines where to end
matching.
:param string: The string where to search in.
:param max_matches: Defines the maximum number of matches. If 0 or
less is provided, the number of matches is not
limited.
:param remove_empty_matches: Defines whether empty entries should
be removed from the result. An entry is
considered empty if no inner match was
performed (regardless of matched start and
end patterns).
:param use_regex: Specifies whether to treat the begin and end
patterns as regexes or simple strings.
:return: An iterator returning the matched strings.
"""
if not use_regex:
begin = re.escape(begin)
end = re.escape(end)
# No need to compile the begin sequence, capturing groups get escaped.
begin_pattern_groups = 0
else:
# Compilation of the begin sequence is needed to get the number of
# capturing groups in it.
begin_pattern_groups = re.compile(begin).groups
# Regex explanation:
# 1. (?<!\\)(?:\\\\)* Unescapes the following char. The first part of
# this regex is a look-behind assertion. Only match
# the following if no single backslash is before it.
# The second part matches all double backslashes.
# In fact this sequence matches all escapes that
# occur as a multiple of two, means the following
# statement is not escaped.
# 2. (begin) A capturing group that matches the begin sequence.
# 3. (.*?) Match any char unlimited times, as few times as
# possible. Save the match in the capturing group
# after all capturing groups that can appear in
# 'begin'.
# 4. (?<!\\)((?:\\\\)*) Again the unescaping regex, but now all escape-
# characters get captured.
# 5. (end) A capturing group that matches the end sequence.
# Because the 3. group is lazy (matches as few times
# as possible) the next occurring end-sequence is
# matched.
regex = (r"(?<!\\)(?:\\\\)*(" + begin + r")(.*?)(?<!\\)((?:\\\\)*)(" +
end + ")")
matches = re.finditer(regex, string, re.DOTALL)
if remove_empty_matches:
matches = trim_empty_matches(matches,
(begin_pattern_groups + 2,
begin_pattern_groups + 3))
matches = limit(matches, max_matches)
for m in matches:
yield InBetweenMatch.from_values(m.group(1),
m.start(1),
m.group(begin_pattern_groups + 2) +
m.group(begin_pattern_groups + 3),
m.start(begin_pattern_groups + 2),
m.group(begin_pattern_groups + 4),
m.start(begin_pattern_groups + 4))
def escape(string, escape_chars, escape_with="\\"):
"""
Escapes all chars given inside the given string.
:param string: The string where to escape characters.
:param escape_chars: The string or Iterable that contains the characters
to escape. Each char inside this string will be
escaped in the order given. Duplicate chars are
allowed.
:param escape_with: The string that should be used as escape sequence.
:return: The escaped string.
"""
for chr in escape_chars:
string = string.replace(chr, escape_with + chr)
return string
def unescape(string):
"""
Trimms off all escape characters from the given string.
:param string: The string to unescape.
"""
regex = r"\\(.)|\\$"
return re.sub(regex, lambda m: m.group(1), string, 0, re.DOTALL)
def position_is_escaped(string, position=None):
"""
Checks whether a char at a specific position of the string is preceded by
an odd number of backslashes.
:param string: Arbitrary string
:param position: Position of character in string that should be checked
:return: True if the character is escaped, False otherwise
"""
escapes_uneven = False
# iterate backwards, starting one left of position.
# Slicing provides a sane default behaviour and prevents IndexErrors
for i in range(len(string[:position]) - 1, -1, -1):
if string[i] == '\\':
escapes_uneven = not escapes_uneven
else:
break
return escapes_uneven
def unescaped_rstrip(string):
"""
Strips whitespaces from the right side of given string that are not
escaped.
:param string: The string where to strip whitespaces from.
:return: The right-stripped string.
"""
stripped = string.rstrip()
if (len(string) > len(stripped) and
position_is_escaped(stripped, len(string))):
stripped += string[len(stripped)]
return stripped
def unescaped_strip(string):
"""
Strips whitespaces of the given string taking escape characters into
account.
:param string: The string where to strip whitespaces from.
:return: The stripped string.
"""
return unescaped_rstrip(string).lstrip()
def _nested_search_in_between(begin, end, string):
"""
Searches for a string enclosed between a specified begin- and end-sequence.
Matches infinite times.
This is a function specifically designed to be invoked from
``nested_search_in_between()``.
:param begin: A regex pattern that defines where to start matching.
:param end: A regex pattern that defines where to end matching.
:param string: The string where to search in.
:return: An iterator returning the matched strings.
"""
# Regex explanation:
# 1. (begin) A capturing group that matches the begin sequence.
# 2. (end) A capturing group that matches the end sequence. Because the
# 1st group is lazy (matches as few times as possible) the next
# occurring end-sequence is matched.
# The '|' in the regex matches either the first or the second part.
regex = "(" + begin + ")|(" + end + ")"
left_match = None
nesting_level = 0
for match in re.finditer(regex, string, re.DOTALL):
if match.group(1) is not None:
if nesting_level == 0:
# Store the match of the first nesting level to be able to
# return the string until the next fitting end sequence.
left_match = match
nesting_level += 1
else:
# The second group matched. This is the only alternative if group 1
# didn't, otherwise no match would be performed. No need to compile
# the begin and end sequences to get the number of capturing groups
# in them.
if nesting_level > 0:
nesting_level -= 1
if nesting_level == 0 and left_match != None:
yield InBetweenMatch.from_values(
left_match.group(),
left_match.start(),
string[left_match.end(): match.start()],
left_match.end(),
match.group(),
match.start())
left_match = None
def nested_search_in_between(begin,
end,
string,
max_matches=0,
remove_empty_matches=False,
use_regex=False):
"""
Searches for a string enclosed between a specified begin- and end-sequence.
Also enclosed \\n are put into the result. Doesn't handle escape sequences,
but supports nesting.
Nested sequences are ignored during the match. Means you get only the first
nesting level returned. If you want to acquire more levels, just reinvoke
this function again on the return value.
Using the same begin- and end-sequence won't match anything.
:param begin: A pattern that defines where to start
matching.
:param end: A pattern that defines where to end matching.
:param string: The string where to search in.
:param max_matches: Defines the maximum number of matches. If 0 or
less is provided, the number of splits is not
limited.
:param remove_empty_matches: Defines whether empty entries should
be removed from the result. An entry is
considered empty if no inner match was
performed (regardless of matched start and
end patterns).
:param use_regex: Specifies whether to treat the begin and end
patterns as regexes or simple strings.
:return: An iterator returning the matched strings.
"""
if not use_regex:
begin = re.escape(begin)
end = re.escape(end)
strings = _nested_search_in_between(begin, end, string)
if remove_empty_matches:
strings = filter(lambda x: str(x.inside) != "", strings)
return limit(strings, max_matches)
| agpl-3.0 |
rkokkelk/CouchPotatoServer | libs/rtorrent/err.py | 182 | 1638 | # Copyright (c) 2013 Chris Lucas, <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from rtorrent.common import convert_version_tuple_to_str
class RTorrentVersionError(Exception):
def __init__(self, min_version, cur_version):
self.min_version = min_version
self.cur_version = cur_version
self.msg = "Minimum version required: {0}".format(
convert_version_tuple_to_str(min_version))
def __str__(self):
return(self.msg)
class MethodError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return(self.msg)
| gpl-3.0 |
kg-bot/SupyBot | plugins/Nag/plugin.py | 1 | 4578 | ###
# Copyright (c) 2006, Jeremy Kelley
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'djangobot.settings'
import time
import supybot.utils as utils
from supybot.commands import *
import supybot.ircmsgs as ircmsgs
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.schedule as schedule
import supybot.callbacks as callbacks
from nag import NagEvent, NagParser, naglist, listnags, getnag
seconds = 20
class Nag(callbacks.Plugin):
"""This plugin provides the ability for the bot to nag a user on
whatever the user needs nagging on.
"""
threaded = True
def __init__(self, irc):
self.__parent = super(Nag, self)
self.__parent.__init__(irc)
f = self._makeCommandFunction(irc)
id = schedule.addEvent(f, time.time() + seconds)
def die(self):
self.__parent.die()
schedule.removeEvent(self.name())
def _makeCommandFunction(self, irc):
"""Makes a function suitable for scheduling from command."""
def f():
id = schedule.addEvent(f, time.time() + seconds)
for y in naglist():
for x in y.audience.split(','):
target = x
s = "NAGGING YOU " + y.action
irc.queueMsg(ircmsgs.privmsg(target, s))
y.delete()
return f
def nag(self, irc, msg, args, cmdstr):
"""<command statement>
See http://33ad.org/wiki/NagBot help on the syntax.
"""
np = NagParser(msg.nick)
ne = np.parse("nag " + cmdstr)
if ne.is_complete():
if ne.event.audience[:-1] == msg.nick or msg.nick == "nod":
ne.save()
dt = ne.event.time.strftime("for %a %b%d at %R")
s = "#%d %s - NAG %s" % (ne.event.id, dt, ne.event.action)
irc.reply("Added %s" % s)
else:
irc.reply("can't create nags for others")
else:
irc.reply(
'something was wrong with your command:'
+ ne.error
)
nag = wrap(nag, ['text'])
remind = wrap(nag, ['text'])
def nags(self, irc, msg, args):
"""no arguments
lists nags going to you
"""
target = msg.nick
n = listnags(msg.nick)
for e in n:
dt = e.time.strftime("on %a %b%d at %R")
s = "#%d %s - NAG %s" % (e.id, dt, e.action)
irc.queueMsg(ircmsgs.privmsg(target, s))
if not n:
irc.queueMsg(ircmsgs.privmsg(target, "no nags"))
nags = wrap(nags)
def cancel(self, irc, msg, args, id):
"""<id of nag>
cancels a nag
"""
n = getnag(id)
if n and msg.nick == n.audience[:-1] or msg.nick == "nod":
n.delete()
irc.reply("done")
else:
irc.reply("you don't have permission")
cancel = wrap(cancel, ['int'])
Class = Nag
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| gpl-3.0 |
maartenq/ansible | lib/ansible/modules/network/f5/bigip_remote_role.py | 6 | 18384 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_remote_role
short_description: Manage remote roles on a BIG-IP
description:
- Manages remote roles on a BIG-IP. Remote roles are used in situations where
user authentication is handled off-box. Local access control to the BIG-IP
is controlled by the defined remote role. Where-as authentication (and by
extension, assignment to the role) is handled off-box.
version_added: 2.7
options:
name:
description:
- Specifies the name of the remote role.
required: True
line_order:
description:
- Specifies the order of the line in the file C(/config/bigip/auth/remoterole).
- The LDAP and Active Directory servers read this file line by line.
- The order of the information is important; therefore, F5 recommends that
you set the first line at 1000. This allows you, in the future, to insert
lines before the first line.
- When creating a new remote role, this parameter is required.
attribute_string:
description:
- Specifies the user account attributes saved in the group, in the format
C(cn=, ou=, dc=).
- When creating a new remote role, this parameter is required.
remote_access:
description:
- Enables or disables remote access for the specified group of remotely
authenticated users.
- When creating a new remote role, if this parameter is not specified, the default
is C(yes).
type: bool
assigned_role:
description:
- Specifies the authorization (level of access) for the account.
- When creating a new remote role, if this parameter is not provided, the
default is C(none).
- The C(partition_access) parameter controls which partitions the account can
access.
- The chosen role may affect the partitions that one is allowed to specify.
Specifically, roles such as C(administrator), C(auditor) and C(resource-administrator)
required a C(partition_access) of C(all).
- A set of pre-existing roles ship with the system. They are C(none), C(guest),
C(operator), C(application-editor), C(manager), C(certificate-manager),
C(irule-manager), C(user-manager), C(resource-administrator), C(auditor),
C(administrator), C(firewall-manager).
partition_access:
description:
- Specifies the accessible partitions for the account.
- This parameter supports the reserved names C(all) and C(Common), as well as
specific partitions a user may access.
- Users who have access to a partition can operate on objects in that partition,
as determined by the permissions conferred by the user's C(assigned_role).
- When creating a new remote role, if this parameter is not specified, the default
is C(all).
terminal_access:
description:
- Specifies terminal-based accessibility for remote accounts not already
explicitly assigned a user role.
- Common values for this include C(tmsh) and C(none), however custom values
may also be specified.
- When creating a new remote role, if this parameter is not specified, the default
is C(none).
state:
description:
- When C(present), guarantees that the remote role exists.
- When C(absent), removes the remote role from the system.
default: present
choices:
- absent
- present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a remote role
bigip_remote_role:
name: foo
group_name: ldap_group
line_order: 1
attribute_string: memberOf=cn=ldap_group,cn=ldap.group,ou=ldap
remote_access: enabled
assigned_role: administrator
partition_access: all
terminal_access: none
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
attribute_string:
description: The new attribute string of the resource.
returned: changed
type: string
sample: "memberOf=cn=ldap_group,cn=ldap.group,ou=ldap"
terminal_access:
description: The terminal setting of the remote role.
returned: changed
type: string
sample: tmsh
line_order:
description: Order of the remote role for LDAP and Active Directory servers.
returned: changed
type: int
sample: 1000
assigned_role:
description: System role that this remote role is associated with.
returned: changed
type: string
sample: administrator
partition_access:
description: Partition that the role has access to.
returned: changed
type: string
sample: all
remote_access:
description: Whether remote access is allowed or not.
returned: changed
type: bool
sample: no
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'attribute': 'attribute_string',
'console': 'terminal_access',
'lineOrder': 'line_order',
'role': 'assigned_role',
'userPartition': 'partition_access',
'deny': 'remote_access'
}
api_attributes = [
'attribute',
'console',
'lineOrder',
'role',
'deny',
'userPartition',
]
returnables = [
'attribute_string',
'terminal_access',
'line_order',
'assigned_role',
'partition_access',
'remote_access',
]
updatables = [
'attribute_string',
'terminal_access',
'line_order',
'assigned_role',
'partition_access',
'remote_access',
]
role_map = {
'application-editor': 'applicationeditor',
'none': 'noaccess',
'certificate-manager': 'certificatemanager',
'irule-manager': 'irulemanager',
'user-manager': 'usermanager',
'resource-administrator': 'resourceadmin',
'firewall-manager': 'firewallmanager'
}
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def partition(self):
return 'Common'
@property
def assigned_role(self):
if self._values['assigned_role'] is None:
return None
return self.role_map.get(self._values['assigned_role'], self._values['assigned_role'])
@property
def terminal_access(self):
if self._values['terminal_access'] in [None, 'tmsh']:
return self._values['terminal_access']
elif self._values['terminal_access'] == 'none':
return 'disable'
return self._values['terminal_access']
@property
def partition_access(self):
if self._values['partition_access'] is None:
return None
if self._values['partition_access'] == 'all':
return 'All'
return self._values['partition_access']
@property
def remote_access(self):
result = flatten_boolean(self._values['remote_access'])
if result == 'yes':
return 'disabled'
elif result == 'no':
return 'enabled'
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def assigned_role(self):
if self._values['assigned_role'] is None:
return None
rmap = dict((v, k) for k, v in iteritems(self.role_map))
return rmap.get(self._values['assigned_role'], self._values['assigned_role'])
@property
def terminal_access(self):
if self._values['terminal_access'] in [None, 'tmsh']:
return self._values['terminal_access']
elif self._values['terminal_access'] == 'disabled':
return 'none'
return self._values['terminal_access']
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/auth/remote-role/role-info/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
if self.want.partition_access is None:
self.want.update({'partition_access': 'all'})
if self.want.remote_access is None:
self.want.update({'remote_access': True})
if self.want.assigned_role is None:
self.want.update({'assigned_role': 'none'})
if self.want.terminal_access is None:
self.want.update({'terminal_access': 'none'})
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
uri = "https://{0}:{1}/mgmt/tm/auth/remote-role/role-info/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/auth/remote-role/role-info/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
if 'Once configured [All] partition, remote user group cannot' in response['message']:
raise F5ModuleError(
"The specified 'attribute_string' is already used in the 'all' partition."
)
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/remote-role/role-info/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/remote-role/role-info/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
line_order=dict(type='int'),
attribute_string=dict(),
remote_access=dict(type='bool'),
assigned_role=dict(),
partition_access=dict(),
terminal_access=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
client = F5RestClient(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
ziel980/website | app/mod_streams/views.py | 1 | 2619 | from flask import *
from app.mod_streams.models import Stream
from app.mod_streams import stream_api
from .forms import ConfigForm
from app.mod_adminpanel.views import register_adminpanel
from flask_login import login_required
mod_streams = Blueprint('streams', __name__, url_prefix='/streams', template_folder='templates',
static_folder='static')
@mod_streams.route('/', methods=['GET', 'POST'])
@login_required
def index():
all_streams = Stream.query.order_by(Stream.is_online.desc(),
Stream.viewers.desc()).all()
active_stream = None
if request.method == 'POST':
active_stream = request.form['submit']
session['chat_enabled'] = request.form.getlist('enable_chat')
if active_stream == 'Refresh':
active_stream = None
stream_api.update_stream_info()
flash("Stream info refreshed!")
return render_template('streams.html',
title='Streams',
streams=all_streams,
active_stream=active_stream)
@register_adminpanel(mod_streams.name)
def do_adminpanel_logic():
config_form = ConfigForm()
# Drop down list shows all channels.
all_streams = Stream.query.all()
config_form.all_channels.choices = [(s.channel, s.channel) for s in all_streams]
if config_form.validate_on_submit():
channels = []
selected_channels = config_form.all_channels.data
if config_form.channel.data:
channels = config_form.channel.data.split(',')
if config_form.add.data:
for channel in channels:
Stream.create('Channel {} added'.format(channel),
'Failed: Channel {} already exists'.format(channel),
channel=channel)
elif config_form.remove.data:
for channel in channels:
Stream.delete('Channel {} deleted'.format(channel),
'Failed: Channel {} doesn\'t exist'.format(channel),
channel=channel)
elif config_form.load.data:
if selected_channels:
load_stream(selected_channels, config_form)
return render_template('streams_config.html', config_form=config_form, title='Admin Panel - Streams')
def load_stream(channels, form):
streams = Stream.query.filter(Stream.channel.in_(channels)).all()
form.channel.data = ','.join([s.channel for s in streams])
for stream in streams:
flash('Stream {} loaded'.format(stream.channel))
| mit |
ahmetyazar/adj-demo | dbinit/pymysql/err.py | 20 | 3612 | import struct
from .constants import ER
class MySQLError(Exception):
"""Exception related to operation with MySQL."""
class Warning(Warning, MySQLError):
"""Exception raised for important warnings like data truncations
while inserting, etc."""
class Error(MySQLError):
"""Exception that is the base class of all other error exceptions
(not Warning)."""
class InterfaceError(Error):
"""Exception raised for errors that are related to the database
interface rather than the database itself."""
class DatabaseError(Error):
"""Exception raised for errors that are related to the
database."""
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range,
etc."""
class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's
operation and not necessarily under the control of the programmer,
e.g. an unexpected disconnect occurs, the data source name is not
found, a transaction could not be processed, a memory allocation
error occurred during processing, etc."""
class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database
is affected, e.g. a foreign key check fails, duplicate key,
etc."""
class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal
error, e.g. the cursor is not valid anymore, the transaction is
out of sync, etc."""
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors, e.g. table not found
or already exists, syntax error in the SQL statement, wrong number
of parameters specified, etc."""
class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used
which is not supported by the database, e.g. requesting a
.rollback() on a connection that does not support transaction or
has transactions turned off."""
error_map = {}
def _map_error(exc, *errors):
for error in errors:
error_map[error] = exc
_map_error(ProgrammingError, ER.DB_CREATE_EXISTS, ER.SYNTAX_ERROR,
ER.PARSE_ERROR, ER.NO_SUCH_TABLE, ER.WRONG_DB_NAME,
ER.WRONG_TABLE_NAME, ER.FIELD_SPECIFIED_TWICE,
ER.INVALID_GROUP_FUNC_USE, ER.UNSUPPORTED_EXTENSION,
ER.TABLE_MUST_HAVE_COLUMNS, ER.CANT_DO_THIS_DURING_AN_TRANSACTION)
_map_error(DataError, ER.WARN_DATA_TRUNCATED, ER.WARN_NULL_TO_NOTNULL,
ER.WARN_DATA_OUT_OF_RANGE, ER.NO_DEFAULT, ER.PRIMARY_CANT_HAVE_NULL,
ER.DATA_TOO_LONG, ER.DATETIME_FUNCTION_OVERFLOW)
_map_error(IntegrityError, ER.DUP_ENTRY, ER.NO_REFERENCED_ROW,
ER.NO_REFERENCED_ROW_2, ER.ROW_IS_REFERENCED, ER.ROW_IS_REFERENCED_2,
ER.CANNOT_ADD_FOREIGN, ER.BAD_NULL_ERROR)
_map_error(NotSupportedError, ER.WARNING_NOT_COMPLETE_ROLLBACK,
ER.NOT_SUPPORTED_YET, ER.FEATURE_DISABLED, ER.UNKNOWN_STORAGE_ENGINE)
_map_error(OperationalError, ER.DBACCESS_DENIED_ERROR, ER.ACCESS_DENIED_ERROR,
ER.CON_COUNT_ERROR, ER.TABLEACCESS_DENIED_ERROR,
ER.COLUMNACCESS_DENIED_ERROR)
del _map_error, ER
def raise_mysql_exception(data):
errno = struct.unpack('<h', data[1:3])[0]
is_41 = data[3:4] == b"#"
if is_41:
# client protocol 4.1
errval = data[9:].decode('utf-8', 'replace')
else:
errval = data[3:].decode('utf-8', 'replace')
errorclass = error_map.get(errno, InternalError)
raise errorclass(errno, errval)
| mit |
mchristopher/PokemonGo-DesktopMap | app/pywin/Lib/fileinput.py | 21 | 13746 | """Helper class to quickly write a loop over all standard input files.
Typical use is:
import fileinput
for line in fileinput.input():
process(line)
This iterates over the lines of all files listed in sys.argv[1:],
defaulting to sys.stdin if the list is empty. If a filename is '-' it
is also replaced by sys.stdin. To specify an alternative list of
filenames, pass it as the argument to input(). A single file name is
also allowed.
Functions filename(), lineno() return the filename and cumulative line
number of the line that has just been read; filelineno() returns its
line number in the current file; isfirstline() returns true iff the
line just read is the first line of its file; isstdin() returns true
iff the line was read from sys.stdin. Function nextfile() closes the
current file so that the next iteration will read the first line from
the next file (if any); lines not read from the file will not count
towards the cumulative line count; the filename is not changed until
after the first line of the next file has been read. Function close()
closes the sequence.
Before any lines have been read, filename() returns None and both line
numbers are zero; nextfile() has no effect. After all lines have been
read, filename() and the line number functions return the values
pertaining to the last line read; nextfile() has no effect.
All files are opened in text mode by default, you can override this by
setting the mode parameter to input() or FileInput.__init__().
If an I/O error occurs during opening or reading a file, the IOError
exception is raised.
If sys.stdin is used more than once, the second and further use will
return no lines, except perhaps for interactive use, or if it has been
explicitly reset (e.g. using sys.stdin.seek(0)).
Empty files are opened and immediately closed; the only time their
presence in the list of filenames is noticeable at all is when the
last file opened is empty.
It is possible that the last line of a file doesn't end in a newline
character; otherwise lines are returned including the trailing
newline.
Class FileInput is the implementation; its methods filename(),
lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
correspond to the functions in the module. In addition it has a
readline() method which returns the next input line, and a
__getitem__() method which implements the sequence behavior. The
sequence must be accessed in strictly sequential order; sequence
access and readline() cannot be mixed.
Optional in-place filtering: if the keyword argument inplace=1 is
passed to input() or to the FileInput constructor, the file is moved
to a backup file and standard output is directed to the input file.
This makes it possible to write a filter that rewrites its input file
in place. If the keyword argument backup=".<some extension>" is also
given, it specifies the extension for the backup file, and the backup
file remains around; by default, the extension is ".bak" and it is
deleted when the output file is closed. In-place filtering is
disabled when standard input is read. XXX The current implementation
does not work for MS-DOS 8+3 filesystems.
XXX Possible additions:
- optional getopt argument processing
- isatty()
- read(), read(size), even readlines()
"""
import sys, os
__all__ = ["input","close","nextfile","filename","lineno","filelineno",
"isfirstline","isstdin","FileInput"]
_state = None
# No longer used
DEFAULT_BUFSIZE = 8*1024
def input(files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
"""Return an instance of the FileInput class, which can be iterated.
The parameters are passed to the constructor of the FileInput class.
The returned instance, in addition to being an iterator,
keeps global state for the functions of this module,.
"""
global _state
if _state and _state._file:
raise RuntimeError, "input() already active"
_state = FileInput(files, inplace, backup, bufsize, mode, openhook)
return _state
def close():
"""Close the sequence."""
global _state
state = _state
_state = None
if state:
state.close()
def nextfile():
"""
Close the current file so that the next iteration will read the first
line from the next file (if any); lines not read from the file will
not count towards the cumulative line count. The filename is not
changed until after the first line of the next file has been read.
Before the first line has been read, this function has no effect;
it cannot be used to skip the first file. After the last line of the
last file has been read, this function has no effect.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.nextfile()
def filename():
"""
Return the name of the file currently being read.
Before the first line has been read, returns None.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filename()
def lineno():
"""
Return the cumulative line number of the line that has just been read.
Before the first line has been read, returns 0. After the last line
of the last file has been read, returns the line number of that line.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.lineno()
def filelineno():
"""
Return the line number in the current file. Before the first line
has been read, returns 0. After the last line of the last file has
been read, returns the line number of that line within the file.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filelineno()
def fileno():
"""
Return the file number of the current file. When no file is currently
opened, returns -1.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.fileno()
def isfirstline():
"""
Returns true the line just read is the first line of its file,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isfirstline()
def isstdin():
"""
Returns true if the last line was read from sys.stdin,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isstdin()
class FileInput:
"""FileInput([files[, inplace[, backup[, bufsize[, mode[, openhook]]]]]])
Class FileInput is the implementation of the module; its methods
filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(),
nextfile() and close() correspond to the functions of the same name
in the module.
In addition it has a readline() method which returns the next
input line, and a __getitem__() method which implements the
sequence behavior. The sequence must be accessed in strictly
sequential order; random access and readline() cannot be mixed.
"""
def __init__(self, files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
if isinstance(files, basestring):
files = (files,)
else:
if files is None:
files = sys.argv[1:]
if not files:
files = ('-',)
else:
files = tuple(files)
self._files = files
self._inplace = inplace
self._backup = backup
self._savestdout = None
self._output = None
self._filename = None
self._startlineno = 0
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = None
# restrict mode argument to reading modes
if mode not in ('r', 'rU', 'U', 'rb'):
raise ValueError("FileInput opening mode must be one of "
"'r', 'rU', 'U' and 'rb'")
self._mode = mode
if inplace and openhook:
raise ValueError("FileInput cannot use an opening hook in inplace mode")
elif openhook and not hasattr(openhook, '__call__'):
raise ValueError("FileInput openhook must be callable")
self._openhook = openhook
def __del__(self):
self.close()
def close(self):
try:
self.nextfile()
finally:
self._files = ()
def __iter__(self):
return self
def next(self):
while 1:
line = self._readline()
if line:
self._filelineno += 1
return line
if not self._file:
raise StopIteration
self.nextfile()
# repeat with next file
def __getitem__(self, i):
if i != self.lineno():
raise RuntimeError, "accessing lines out of order"
try:
return self.next()
except StopIteration:
raise IndexError, "end of input reached"
def nextfile(self):
savestdout = self._savestdout
self._savestdout = 0
if savestdout:
sys.stdout = savestdout
output = self._output
self._output = 0
try:
if output:
output.close()
finally:
file = self._file
self._file = None
try:
del self._readline # restore FileInput._readline
except AttributeError:
pass
try:
if file and not self._isstdin:
file.close()
finally:
backupfilename = self._backupfilename
self._backupfilename = 0
if backupfilename and not self._backup:
try: os.unlink(backupfilename)
except OSError: pass
self._isstdin = False
def readline(self):
while 1:
line = self._readline()
if line:
self._filelineno += 1
return line
if not self._file:
return line
self.nextfile()
# repeat with next file
def _readline(self):
if not self._files:
return ""
self._filename = self._files[0]
self._files = self._files[1:]
self._startlineno = self.lineno()
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = 0
if self._filename == '-':
self._filename = '<stdin>'
self._file = sys.stdin
self._isstdin = True
else:
if self._inplace:
self._backupfilename = (
self._filename + (self._backup or os.extsep+"bak"))
try: os.unlink(self._backupfilename)
except os.error: pass
# The next few lines may raise IOError
os.rename(self._filename, self._backupfilename)
self._file = open(self._backupfilename, self._mode)
try:
perm = os.fstat(self._file.fileno()).st_mode
except OSError:
self._output = open(self._filename, "w")
else:
fd = os.open(self._filename,
os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
perm)
self._output = os.fdopen(fd, "w")
try:
if hasattr(os, 'chmod'):
os.chmod(self._filename, perm)
except OSError:
pass
self._savestdout = sys.stdout
sys.stdout = self._output
else:
# This may raise IOError
if self._openhook:
self._file = self._openhook(self._filename, self._mode)
else:
self._file = open(self._filename, self._mode)
self._readline = self._file.readline # hide FileInput._readline
return self._readline()
def filename(self):
return self._filename
def lineno(self):
return self._startlineno + self._filelineno
def filelineno(self):
return self._filelineno
def fileno(self):
if self._file:
try:
return self._file.fileno()
except ValueError:
return -1
else:
return -1
def isfirstline(self):
return self._filelineno == 1
def isstdin(self):
return self._isstdin
def hook_compressed(filename, mode):
ext = os.path.splitext(filename)[1]
if ext == '.gz':
import gzip
return gzip.open(filename, mode)
elif ext == '.bz2':
import bz2
return bz2.BZ2File(filename, mode)
else:
return open(filename, mode)
def hook_encoded(encoding):
import io
def openhook(filename, mode):
mode = mode.replace('U', '').replace('b', '') or 'r'
return io.open(filename, mode, encoding=encoding, newline='')
return openhook
def _test():
import getopt
inplace = 0
backup = 0
opts, args = getopt.getopt(sys.argv[1:], "ib:")
for o, a in opts:
if o == '-i': inplace = 1
if o == '-b': backup = a
for line in input(args, inplace=inplace, backup=backup):
if line[-1:] == '\n': line = line[:-1]
if line[-1:] == '\r': line = line[:-1]
print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
isfirstline() and "*" or "", line)
print "%d: %s[%d]" % (lineno(), filename(), filelineno())
if __name__ == '__main__':
_test()
| mit |
stamhe/namecoin | client/DNS/__init__.py | 40 | 1895 | # -*- encoding: utf-8 -*-
# $Id: __init__.py,v 1.8.2.7 2009/06/09 18:05:29 customdesigned Exp $
#
# This file is part of the pydns project.
# Homepage: http://pydns.sourceforge.net
#
# This code is covered by the standard Python License.
#
# __init__.py for DNS class.
__version__ = '2.3.4'
import Type,Opcode,Status,Class
from Base import DnsRequest, DNSError
from Lib import DnsResult
from Base import *
from Lib import *
Error=DNSError
from lazy import *
Request = DnsRequest
Result = DnsResult
#
# $Log: __init__.py,v $
# Revision 1.8.2.7 2009/06/09 18:05:29 customdesigned
# Release 2.3.4
#
# Revision 1.8.2.6 2008/08/01 04:01:25 customdesigned
# Release 2.3.3
#
# Revision 1.8.2.5 2008/07/28 02:11:07 customdesigned
# Bump version.
#
# Revision 1.8.2.4 2008/07/28 00:17:10 customdesigned
# Randomize source ports.
#
# Revision 1.8.2.3 2008/07/24 20:10:55 customdesigned
# Randomize tid in requests, and check in response.
#
# Revision 1.8.2.2 2007/05/22 21:06:52 customdesigned
# utf-8 in __init__.py
#
# Revision 1.8.2.1 2007/05/22 20:39:20 customdesigned
# Release 2.3.1
#
# Revision 1.8 2002/05/06 06:17:49 anthonybaxter
# found that the old README file called itself release 2.2. So make
# this one 2.3...
#
# Revision 1.7 2002/05/06 06:16:15 anthonybaxter
# make some sort of reasonable version string. releasewards ho!
#
# Revision 1.6 2002/03/19 13:05:02 anthonybaxter
# converted to class based exceptions (there goes the python1.4 compatibility :)
#
# removed a quite gross use of 'eval()'.
#
# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.4 2001/11/26 17:57:51 stroeder
# Added __version__
#
# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#
| mit |
chirilo/phantomjs | src/breakpad/src/tools/gyp/test/subdirectory/gyptest-SYMROOT-all.py | 399 | 1269 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
The configuration sets the Xcode SYMROOT variable and uses --depth=
to make Xcode behave like the other build tools--that is, put all
built targets in a single output build directory at the top of the tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', '-Dset_symroot=1', '--depth=.', chdir='src')
test.relocate('src', 'relocate/src')
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', test.ALL, SYMROOT=None, chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
test.run_built_executable('prog2',
stdout="Hello from prog2.c\n",
chdir='relocate/src')
test.pass_test()
| bsd-3-clause |
spadae22/odoo | openerp/tools/graph.py | 441 | 26118 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import operator
import math
class graph(object):
def __init__(self, nodes, transitions, no_ancester=None):
"""Initialize graph's object
@param nodes list of ids of nodes in the graph
@param transitions list of edges in the graph in the form (source_node, destination_node)
@param no_ancester list of nodes with no incoming edges
"""
self.nodes = nodes or []
self.edges = transitions or []
self.no_ancester = no_ancester or {}
trans = {}
for t in transitions:
trans.setdefault(t[0], [])
trans[t[0]].append(t[1])
self.transitions = trans
self.result = {}
def init_rank(self):
"""Computes rank of the nodes of the graph by finding initial feasible tree
"""
self.edge_wt = {}
for link in self.links:
self.edge_wt[link] = self.result[link[1]]['x'] - self.result[link[0]]['x']
tot_node = len(self.partial_order)
#do until all the nodes in the component are searched
while self.tight_tree()<tot_node:
list_node = []
list_edge = []
for node in self.nodes:
if node not in self.reachable_nodes:
list_node.append(node)
for edge in self.edge_wt:
if edge not in self.tree_edges:
list_edge.append(edge)
slack = 100
for edge in list_edge:
if ((edge[0] in self.reachable_nodes and edge[1] not in self.reachable_nodes) or
(edge[1] in self.reachable_nodes and edge[0] not in self.reachable_nodes)):
if slack > self.edge_wt[edge]-1:
slack = self.edge_wt[edge]-1
new_edge = edge
if new_edge[0] not in self.reachable_nodes:
delta = -(self.edge_wt[new_edge]-1)
else:
delta = self.edge_wt[new_edge]-1
for node in self.result:
if node in self.reachable_nodes:
self.result[node]['x'] += delta
for edge in self.edge_wt:
self.edge_wt[edge] = self.result[edge[1]]['x'] - self.result[edge[0]]['x']
self.init_cutvalues()
def tight_tree(self):
self.reachable_nodes = []
self.tree_edges = []
self.reachable_node(self.start)
return len(self.reachable_nodes)
def reachable_node(self, node):
"""Find the nodes of the graph which are only 1 rank apart from each other
"""
if node not in self.reachable_nodes:
self.reachable_nodes.append(node)
for edge in self.edge_wt:
if edge[0]==node:
if self.edge_wt[edge]==1:
self.tree_edges.append(edge)
if edge[1] not in self.reachable_nodes:
self.reachable_nodes.append(edge[1])
self.reachable_node(edge[1])
def init_cutvalues(self):
"""Initailize cut values of edges of the feasible tree.
Edges with negative cut-values are removed from the tree to optimize rank assignment
"""
self.cut_edges = {}
self.head_nodes = []
i=0
for edge in self.tree_edges:
self.head_nodes = []
rest_edges = []
rest_edges += self.tree_edges
del rest_edges[i]
self.head_component(self.start, rest_edges)
i+=1
positive = 0
negative = 0
for source_node in self.transitions:
if source_node in self.head_nodes:
for dest_node in self.transitions[source_node]:
if dest_node not in self.head_nodes:
negative+=1
else:
for dest_node in self.transitions[source_node]:
if dest_node in self.head_nodes:
positive+=1
self.cut_edges[edge] = positive - negative
def head_component(self, node, rest_edges):
"""Find nodes which are reachable from the starting node, after removing an edge
"""
if node not in self.head_nodes:
self.head_nodes.append(node)
for edge in rest_edges:
if edge[0]==node:
self.head_component(edge[1],rest_edges)
def process_ranking(self, node, level=0):
"""Computes initial feasible ranking after making graph acyclic with depth-first search
"""
if node not in self.result:
self.result[node] = {'y': None, 'x':level, 'mark':0}
else:
if level > self.result[node]['x']:
self.result[node]['x'] = level
if self.result[node]['mark']==0:
self.result[node]['mark'] = 1
for sec_end in self.transitions.get(node, []):
self.process_ranking(sec_end, level+1)
def make_acyclic(self, parent, node, level, tree):
"""Computes Partial-order of the nodes with depth-first search
"""
if node not in self.partial_order:
self.partial_order[node] = {'level':level, 'mark':0}
if parent:
tree.append((parent, node))
if self.partial_order[node]['mark']==0:
self.partial_order[node]['mark'] = 1
for sec_end in self.transitions.get(node, []):
self.links.append((node, sec_end))
self.make_acyclic(node, sec_end, level+1, tree)
return tree
def rev_edges(self, tree):
"""reverse the direction of the edges whose source-node-partail_order> destination-node-partail_order
to make the graph acyclic
"""
Is_Cyclic = False
i=0
for link in self.links:
src = link[0]
des = link[1]
edge_len = self.partial_order[des]['level'] - self.partial_order[src]['level']
if edge_len < 0:
del self.links[i]
self.links.insert(i, (des, src))
self.transitions[src].remove(des)
self.transitions.setdefault(des, []).append(src)
Is_Cyclic = True
elif math.fabs(edge_len) > 1:
Is_Cyclic = True
i += 1
return Is_Cyclic
def exchange(self, e, f):
"""Exchange edges to make feasible-tree optimized
:param e: edge with negative cut-value
:param f: new edge with minimum slack-value
"""
del self.tree_edges[self.tree_edges.index(e)]
self.tree_edges.append(f)
self.init_cutvalues()
def enter_edge(self, edge):
"""Finds a new_edge with minimum slack value to replace an edge with negative cut-value
@param edge edge with negative cut-value
"""
self.head_nodes = []
rest_edges = []
rest_edges += self.tree_edges
del rest_edges[rest_edges.index(edge)]
self.head_component(self.start, rest_edges)
if edge[1] in self.head_nodes:
l = []
for node in self.result:
if node not in self.head_nodes:
l.append(node)
self.head_nodes = l
slack = 100
new_edge = edge
for source_node in self.transitions:
if source_node in self.head_nodes:
for dest_node in self.transitions[source_node]:
if dest_node not in self.head_nodes:
if slack>(self.edge_wt[edge]-1):
slack = self.edge_wt[edge]-1
new_edge = (source_node, dest_node)
return new_edge
def leave_edge(self):
"""Returns the edge with negative cut_value(if exists)
"""
if self.critical_edges:
for edge in self.critical_edges:
self.cut_edges[edge] = 0
for edge in self.cut_edges:
if self.cut_edges[edge]<0:
return edge
return None
def finalize_rank(self, node, level):
self.result[node]['x'] = level
for destination in self.optimal_edges.get(node, []):
self.finalize_rank(destination, level+1)
def normalize(self):
"""The ranks are normalized by setting the least rank to zero.
"""
least_rank = min(map(lambda x: x['x'], self.result.values()))
if least_rank!=0:
for node in self.result:
self.result[node]['x']-=least_rank
def make_chain(self):
"""Edges between nodes more than one rank apart are replaced by chains of unit
length edges between temporary nodes.
"""
for edge in self.edge_wt:
if self.edge_wt[edge]>1:
self.transitions[edge[0]].remove(edge[1])
start = self.result[edge[0]]['x']
end = self.result[edge[1]]['x']
for rank in range(start+1, end):
if not self.result.get((rank, 'temp'), False):
self.result[(rank, 'temp')] = {'y': None, 'x': rank, 'mark': 0}
for rank in range(start, end):
if start==rank:
self.transitions[edge[0]].append((rank+1, 'temp'))
elif rank==end-1:
self.transitions.setdefault((rank, 'temp'), []).append(edge[1])
else:
self.transitions.setdefault((rank, 'temp'), []).append((rank+1, 'temp'))
def init_order(self, node, level):
"""Initialize orders the nodes in each rank with depth-first search
"""
if not self.result[node]['y']:
self.result[node]['y'] = self.order[level]
self.order[level] += 1
for sec_end in self.transitions.get(node, []):
if node!=sec_end:
self.init_order(sec_end, self.result[sec_end]['x'])
def order_heuristic(self):
for i in range(12):
self.wmedian()
def wmedian(self):
"""Applies median heuristic to find optimzed order of the nodes with in their ranks
"""
for level in self.levels:
node_median = []
nodes = self.levels[level]
for node in nodes:
node_median.append((node, self.median_value(node, level-1)))
sort_list = sorted(node_median, key=operator.itemgetter(1))
new_list = [tuple[0] for tuple in sort_list]
self.levels[level] = new_list
order = 0
for node in nodes:
self.result[node]['y'] = order
order +=1
def median_value(self, node, adj_rank):
"""Returns median value of a vertex , defined as the median position of the adjacent vertices
@param node node to process
@param adj_rank rank 1 less than the node's rank
"""
adj_nodes = self.adj_position(node, adj_rank)
l = len(adj_nodes)
m = l/2
if l==0:
return -1.0
elif l%2 == 1:
return adj_nodes[m]#median of the middle element
elif l==2:
return (adj_nodes[0]+adj_nodes[1])/2
else:
left = adj_nodes[m-1] - adj_nodes[0]
right = adj_nodes[l-1] - adj_nodes[m]
return ((adj_nodes[m-1]*right) + (adj_nodes[m]*left))/(left+right)
def adj_position(self, node, adj_rank):
"""Returns list of the present positions of the nodes adjacent to node in the given adjacent rank.
@param node node to process
@param adj_rank rank 1 less than the node's rank
"""
pre_level_nodes = self.levels.get(adj_rank, [])
adj_nodes = []
if pre_level_nodes:
for src in pre_level_nodes:
if self.transitions.get(src) and node in self.transitions[src]:
adj_nodes.append(self.result[src]['y'])
return adj_nodes
def preprocess_order(self):
levels = {}
for r in self.partial_order:
l = self.result[r]['x']
levels.setdefault(l,[])
levels[l].append(r)
self.levels = levels
def graph_order(self):
"""Finds actual-order of the nodes with respect to maximum number of nodes in a rank in component
"""
mid_pos = 0.0
max_level = max(map(lambda x: len(x), self.levels.values()))
for level in self.levels:
if level:
no = len(self.levels[level])
factor = (max_level - no) * 0.10
list = self.levels[level]
list.reverse()
if no%2==0:
first_half = list[no/2:]
factor = -factor
else:
first_half = list[no/2+1:]
if max_level==1:#for the case when horizontal graph is there
self.result[list[no/2]]['y'] = mid_pos + (self.result[list[no/2]]['x']%2 * 0.5)
else:
self.result[list[no/2]]['y'] = mid_pos + factor
last_half = list[:no/2]
i=1
for node in first_half:
self.result[node]['y'] = mid_pos - (i + factor)
i += 1
i=1
for node in last_half:
self.result[node]['y'] = mid_pos + (i + factor)
i += 1
else:
self.max_order += max_level+1
mid_pos = self.result[self.start]['y']
def tree_order(self, node, last=0):
mid_pos = self.result[node]['y']
l = self.transitions.get(node, [])
l.reverse()
no = len(l)
rest = no%2
first_half = l[no/2+rest:]
last_half = l[:no/2]
for i, child in enumerate(first_half):
self.result[child]['y'] = mid_pos - (i+1 - (0 if rest else 0.5))
if self.transitions.get(child, False):
if last:
self.result[child]['y'] = last + len(self.transitions[child])/2 + 1
last = self.tree_order(child, last)
if rest:
mid_node = l[no/2]
self.result[mid_node]['y'] = mid_pos
if self.transitions.get(mid_node, False):
if last:
self.result[mid_node]['y'] = last + len(self.transitions[mid_node])/2 + 1
if node!=mid_node:
last = self.tree_order(mid_node)
else:
if last:
self.result[mid_node]['y'] = last + 1
self.result[node]['y'] = self.result[mid_node]['y']
mid_pos = self.result[node]['y']
i=1
last_child = None
for child in last_half:
self.result[child]['y'] = mid_pos + (i - (0 if rest else 0.5))
last_child = child
i += 1
if self.transitions.get(child, False):
if last:
self.result[child]['y'] = last + len(self.transitions[child])/2 + 1
if node!=child:
last = self.tree_order(child, last)
if last_child:
last = self.result[last_child]['y']
return last
def process_order(self):
"""Finds actual-order of the nodes with respect to maximum number of nodes in a rank in component
"""
if self.Is_Cyclic:
max_level = max(map(lambda x: len(x), self.levels.values()))
if max_level%2:
self.result[self.start]['y'] = (max_level+1)/2 + self.max_order + (self.max_order and 1)
else:
self.result[self.start]['y'] = max_level /2 + self.max_order + (self.max_order and 1)
self.graph_order()
else:
self.result[self.start]['y'] = 0
self.tree_order(self.start, 0)
min_order = math.fabs(min(map(lambda x: x['y'], self.result.values())))
index = self.start_nodes.index(self.start)
same = False
roots = []
if index>0:
for start in self.start_nodes[:index]:
same = True
for edge in self.tree_list[start][1:]:
if edge in self.tree_list[self.start]:
continue
else:
same = False
break
if same:
roots.append(start)
if roots:
min_order += self.max_order
else:
min_order += self.max_order + 1
for level in self.levels:
for node in self.levels[level]:
self.result[node]['y'] += min_order
if roots:
roots.append(self.start)
one_level_el = self.tree_list[self.start][0][1]
base = self.result[one_level_el]['y']# * 2 / (index + 2)
no = len(roots)
first_half = roots[:no/2]
if no%2==0:
last_half = roots[no/2:]
else:
last_half = roots[no/2+1:]
factor = -math.floor(no/2)
for start in first_half:
self.result[start]['y'] = base + factor
factor += 1
if no%2:
self.result[roots[no/2]]['y'] = base + factor
factor +=1
for start in last_half:
self.result[start]['y'] = base + factor
factor += 1
self.max_order = max(map(lambda x: x['y'], self.result.values()))
def find_starts(self):
"""Finds other start nodes of the graph in the case when graph is disconneted
"""
rem_nodes = []
for node in self.nodes:
if not self.partial_order.get(node):
rem_nodes.append(node)
cnt = 0
while True:
if len(rem_nodes)==1:
self.start_nodes.append(rem_nodes[0])
break
else:
count = 0
new_start = rem_nodes[0]
largest_tree = []
for node in rem_nodes:
self.partial_order = {}
tree = self.make_acyclic(None, node, 0, [])
if len(tree)+1 > count:
count = len(tree) + 1
new_start = node
largest_tree = tree
else:
if not largest_tree:
new_start = rem_nodes[0]
rem_nodes.remove(new_start)
self.start_nodes.append(new_start)
for edge in largest_tree:
if edge[0] in rem_nodes:
rem_nodes.remove(edge[0])
if edge[1] in rem_nodes:
rem_nodes.remove(edge[1])
if not rem_nodes:
break
def rank(self):
"""Finds the optimized rank of the nodes using Network-simplex algorithm
"""
self.levels = {}
self.critical_edges = []
self.partial_order = {}
self.links = []
self.Is_Cyclic = False
self.tree_list[self.start] = self.make_acyclic(None, self.start, 0, [])
self.Is_Cyclic = self.rev_edges(self.tree_list[self.start])
self.process_ranking(self.start)
self.init_rank()
#make cut values of all tree edges to 0 to optimize feasible tree
e = self.leave_edge()
while e :
f = self.enter_edge(e)
if e==f:
self.critical_edges.append(e)
else:
self.exchange(e,f)
e = self.leave_edge()
#finalize rank using optimum feasible tree
# self.optimal_edges = {}
# for edge in self.tree_edges:
# source = self.optimal_edges.setdefault(edge[0], [])
# source.append(edge[1])
# self.finalize_rank(self.start, 0)
#normalization
self.normalize()
for edge in self.edge_wt:
self.edge_wt[edge] = self.result[edge[1]]['x'] - self.result[edge[0]]['x']
def order_in_rank(self):
"""Finds optimized order of the nodes within their ranks using median heuristic
"""
self.make_chain()
self.preprocess_order()
self.order = {}
max_rank = max(map(lambda x: x, self.levels.keys()))
for i in range(max_rank+1):
self.order[i] = 0
self.init_order(self.start, self.result[self.start]['x'])
for level in self.levels:
self.levels[level].sort(lambda x, y: cmp(self.result[x]['y'], self.result[y]['y']))
self.order_heuristic()
self.process_order()
def process(self, starting_node):
"""Process the graph to find ranks and order of the nodes
@param starting_node node from where to start the graph search
"""
self.start_nodes = starting_node or []
self.partial_order = {}
self.links = []
self.tree_list = {}
if self.nodes:
if self.start_nodes:
#add dummy edges to the nodes which does not have any incoming edges
tree = self.make_acyclic(None, self.start_nodes[0], 0, [])
for node in self.no_ancester:
for sec_node in self.transitions.get(node, []):
if sec_node in self.partial_order.keys():
self.transitions[self.start_nodes[0]].append(node)
break
self.partial_order = {}
tree = self.make_acyclic(None, self.start_nodes[0], 0, [])
# if graph is disconnected or no start-node is given
#than to find starting_node for each component of the node
if len(self.nodes) > len(self.partial_order):
self.find_starts()
self.max_order = 0
#for each component of the graph find ranks and order of the nodes
for s in self.start_nodes:
self.start = s
self.rank() # First step:Netwoek simplex algorithm
self.order_in_rank() #Second step: ordering nodes within ranks
def __str__(self):
result = ''
for l in self.levels:
result += 'PosY: ' + str(l) + '\n'
for node in self.levels[l]:
result += '\tPosX: '+ str(self.result[node]['y']) + ' - Node:' + str(node) + "\n"
return result
def scale(self, maxx, maxy, nwidth=0, nheight=0, margin=20):
"""Computes actual co-ordiantes of the nodes
"""
#for flat edges ie. source an destination nodes are on the same rank
for src in self.transitions:
for des in self.transitions[src]:
if self.result[des]['x'] - self.result[src]['x'] == 0:
self.result[src]['x'] += 0.08
self.result[des]['x'] -= 0.08
factorX = maxx + nheight
factorY = maxy + nwidth
for node in self.result:
self.result[node]['y'] = (self.result[node]['y']) * factorX + margin
self.result[node]['x'] = (self.result[node]['x']) * factorY + margin
def result_get(self):
return self.result
if __name__=='__main__':
starting_node = ['profile'] # put here nodes with flow_start=True
nodes = ['project','account','hr','base','product','mrp','test','profile']
transitions = [
('profile','mrp'),
('mrp','project'),
('project','product'),
('mrp','hr'),
('mrp','test'),
('project','account'),
('project','hr'),
('product','base'),
('account','product'),
('account','test'),
('account','base'),
('hr','base'),
('test','base')
]
radius = 20
g = graph(nodes, transitions)
g.process(starting_node)
g.scale(radius*3,radius*3, radius, radius)
from PIL import Image
from PIL import ImageDraw
img = Image.new("RGB", (800, 600), "#ffffff")
draw = ImageDraw.Draw(img)
result = g.result_get()
node_res = {}
for node in nodes:
node_res[node] = result[node]
for name,node in node_res.items():
draw.arc( (int(node['y']-radius), int(node['x']-radius),int(node['y']+radius), int(node['x']+radius) ), 0, 360, (128,128,128))
draw.text( (int(node['y']), int(node['x'])), str(name), (128,128,128))
for t in transitions:
draw.line( (int(node_res[t[0]]['y']), int(node_res[t[0]]['x']),int(node_res[t[1]]['y']),int(node_res[t[1]]['x'])),(128,128,128) )
img.save("graph.png", "PNG")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tsabi/Odoo-tsabi-fixes | addons/account/tests/test_tax.py | 449 | 1740 | from openerp.tests.common import TransactionCase
class TestTax(TransactionCase):
"""Tests for taxes (account.tax)
We don't really need at this point to link taxes to tax codes
(account.tax.code) nor to companies (base.company) to check computation
results.
"""
def setUp(self):
super(TestTax, self).setUp()
self.tax_model = self.registry('account.tax')
def test_programmatic_tax(self):
cr, uid = self.cr, self.uid
tax_id = self.tax_model.create(cr, uid, dict(
name="Programmatic tax",
type='code',
python_compute='result = 12.0',
python_compute_inv='result = 11.0',
))
tax_records = self.tax_model.browse(cr, uid, [tax_id])
res = self.tax_model.compute_all(cr, uid, tax_records, 50.0, 2)
tax_detail = res['taxes'][0]
self.assertEquals(tax_detail['amount'], 24.0)
self.assertEquals(res['total_included'], 124.0)
def test_percent_tax(self):
"""Test computations done by a 10 percent tax."""
cr, uid = self.cr, self.uid
tax_id = self.tax_model.create(cr, uid, dict(
name="Percent tax",
type='percent',
amount='0.1',
))
tax_records = self.tax_model.browse(cr, uid, [tax_id])
res = self.tax_model.compute_all(cr, uid, tax_records, 50.0, 2)
tax_detail = res['taxes'][0]
self.assertEquals(tax_detail['amount'], 10.0)
self.assertEquals(res['total_included'], 110.0)
# now the inverse computation
res = self.tax_model.compute_inv(cr, uid, tax_records, 55.0, 2)
self.assertEquals(res[0]['amount'], 10.0)
| agpl-3.0 |
magul/pywikibot-core | generate_family_file.py | 1 | 6872 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""This script generates a family file from a given URL."""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
#
# (C) Merlijn van Deen, 2010-2013
# (C) Pywikibot team, 2010-2018
#
# Distributed under the terms of the MIT license
#
# system imports
import codecs
import os
import sys
# creating & retrieving urls
if sys.version_info[0] > 2:
from urllib.parse import urlparse
raw_input = input
else:
from urlparse import urlparse
# Disable user-config checks so the family can be created first,
# and then used when generating the user-config
_orig_no_user_config = os.environ.get('PYWIKIBOT2_NO_USER_CONFIG')
os.environ['PYWIKIBOT2_NO_USER_CONFIG'] = '2'
from pywikibot.site_detect import MWSite as Wiki
# Reset this flag in case another script is run by pwb after this script
if not _orig_no_user_config:
del os.environ['PYWIKIBOT2_NO_USER_CONFIG']
else:
os.environ['PYWIKIBOT2_NO_USER_CONFIG'] = _orig_no_user_config
class FamilyFileGenerator(object):
"""Family file creator."""
def __init__(self, url=None, name=None, dointerwiki=None):
"""Constructor."""
if url is None:
url = raw_input("Please insert URL to wiki: ")
if name is None:
name = raw_input("Please insert a short name (eg: freeciv): ")
self.dointerwiki = dointerwiki
self.base_url = url
self.name = name
self.wikis = {} # {'https://wiki/$1': Wiki('https://wiki/$1'), ...}
self.langs = [] # [Wiki('https://wiki/$1'), ...]
def run(self):
"""Main method, generate family file."""
print("Generating family file from %s" % self.base_url)
w = Wiki(self.base_url)
self.wikis[w.lang] = w
print()
print("==================================")
print("api url: %s" % w.api)
print("MediaWiki version: %s" % w.version)
print("==================================")
print()
self.getlangs(w)
self.getapis()
self.writefile()
def getlangs(self, w):
"""Determine language of a site."""
print("Determining other languages...", end="")
try:
self.langs = w.langs
print(' '.join(sorted(wiki['prefix'] for wiki in self.langs)))
except Exception as e:
self.langs = []
print(e, "; continuing...")
if len([lang for lang in self.langs if lang['url'] == w.iwpath]) == 0:
if w.private_wiki:
w.lang = self.name
self.langs.append({u'language': w.lang,
u'local': u'',
u'prefix': w.lang,
u'url': w.iwpath})
if len(self.langs) > 1:
if self.dointerwiki is None:
makeiw = raw_input(
"\nThere are %i languages available."
"\nDo you want to generate interwiki links?"
"This might take a long time. ([y]es/[N]o/[e]dit)"
% len(self.langs)).lower()
else:
makeiw = self.dointerwiki
if makeiw == "y":
pass
elif makeiw == "e":
for wiki in self.langs:
print(wiki['prefix'], wiki['url'])
do_langs = raw_input("Which languages do you want: ")
self.langs = [wiki for wiki in self.langs
if wiki['prefix'] in do_langs or
wiki['url'] == w.iwpath]
else:
self.langs = [wiki for wiki in self.langs
if wiki[u'url'] == w.iwpath]
def getapis(self):
"""Load other language pages."""
print("Loading wikis... ")
for lang in self.langs:
print(" * %s... " % (lang[u'prefix']), end="")
if lang['prefix'] not in self.wikis:
try:
self.wikis[lang['prefix']] = Wiki(lang['url'])
print("downloaded")
except Exception as e:
print(e)
else:
print("in cache")
def writefile(self):
"""Write the family file."""
fn = "pywikibot/families/%s_family.py" % self.name
print("Writing %s... " % fn)
try:
open(fn)
if raw_input("%s already exists. Overwrite? (y/n)"
% fn).lower() == 'n':
print("Terminating.")
sys.exit(1)
except IOError: # file not found
pass
f = codecs.open(fn, 'w', 'utf-8')
f.write("""
# -*- coding: utf-8 -*-
\"\"\"
This family file was auto-generated by $Id$
Configuration parameters:
url = %(url)s
name = %(name)s
Please do not commit this to the Git repository!
\"\"\"
from pywikibot import family
from pywikibot.tools import deprecated
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = '%(name)s'
self.langs = {
""".lstrip() % {'url': self.base_url, 'name': self.name})
for k, w in self.wikis.items():
f.write(" '%(lang)s': '%(hostname)s',\n"
% {'lang': k, 'hostname': urlparse(w.server).netloc})
f.write(" }\n\n")
f.write(" def scriptpath(self, code):\n")
f.write(" return {\n")
for k, w in self.wikis.items():
f.write(" '%(lang)s': '%(path)s',\n"
% {'lang': k, 'path': w.scriptpath})
f.write(" }[code]\n")
f.write("\n")
f.write(" @deprecated('APISite.version()')\n")
f.write(" def version(self, code):\n")
f.write(" return {\n")
for k, w in self.wikis.items():
if w.version is None:
f.write(" '%(lang)s': None,\n" % {'lang': k})
else:
f.write(" '%(lang)s': u'%(ver)s',\n"
% {'lang': k, 'ver': w.version})
f.write(" }[code]\n")
f.write("\n")
f.write(" def protocol(self, code):\n")
f.write(" return {\n")
for k, w in self.wikis.items():
f.write(" '%(lang)s': u'%(protocol)s',\n"
% {'lang': k, 'protocol': urlparse(w.server).scheme})
f.write(" }[code]\n")
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: %s <url> <short name>" % sys.argv[0])
print("Example: %s https://www.mywiki.bogus/wiki/Main_Page mywiki"
% sys.argv[0])
print("This will create the file families/mywiki_family.py")
FamilyFileGenerator(*sys.argv[1:]).run()
| mit |
ohio813/pyflag | src/plugins_old/DiskForensics/FileHandlers/Extractor.py | 5 | 2138 | """ This is a scanner which utilises libextractor to collect metadata
about some files.
"""
from pyflag.Scanner import *
import pyflag.Reports as Reports
from pyflag.ColumnTypes import StringType, TimestampType
active = False
try:
import extractor
E = extractor.Extractor()
class ExtractorScan(GenScanFactory):
""" A Scanner to collect metadata about files """
order = 90
default = True
depends = 'TypeScan'
class Scan(StoreAndScanType):
types = (
# This forces all images to be cached do we really want this?
# 'image/.*',
'application/msword',
'application/x-executable'
)
def external_process(self,fd):
dbh=DB.DBO(self.case)
meta=E.extractFromFile(fd.name)
dbh = DB.DBO(self.case)
for pair in meta:
dbh.insert("xattr",
inode_id = self.fd.lookup_id(),
property = pair[0],
value = pair[1],
)
class BrowseMetadata(Reports.report):
"""
Browse Metadata
---------------
PyFlag can use the libextractor scanner to gather many
interesting facts about files being scanned. The specifics of
this metadata depends on libextractor, but generally metadata
reveals intimate details relating to the files - such as
authors, creation times etc.
This report simply presents the data in a tabular format so it
can be searched simply.
"""
name = "Browse Metadata"
family = "Disk Forensics"
def display(self, query, result):
result.table(
elements = [ InodeIDType(case=query['case']),
StringType('Property','property'),
StringType('Value','value')],
table = 'xattr',
case = query['case'],
)
except ImportError:
pass
| gpl-2.0 |
jumoconnect/openjumo | jumodjango/miner/web/feed_items_to_hdfs.py | 1 | 3768 | #!/usr/bin/env python
from crawler import WebCrawler, log
import logging
import os
import hashlib
from miner.text.util import strip_control_characters
import MySQLdb
from MySQLdb import cursors
class FeedItemWebCrawler(WebCrawler):
temp_outfile_path = '/tmp/feed_items'
outfile_base_path = '/tmp/scraped'
hdfs_path = '/miner/classifiers/training_data/feed_items'
def fetch_url(self, line):
issue_id, url, data = line.split('\t')
url = url.strip('"')
outfile = os.sep.join([self.outfile_base_path, hashlib.md5(''.join([issue_id, url or data])).hexdigest()]) + '.out'
if url and not os.path.exists(outfile):
new_data = super(FeedItemWebCrawler, self).fetch_url(url)
if new_data:
data = new_data
if not os.path.exists(outfile):
with open(outfile, 'w') as f:
f.write('\t'.join([issue_id, strip_control_characters(data)]))
return 'Wrote data'
else:
return 'Nada'
@classmethod
def write_to_temp(cls, temp_file, host, user, password, db):
conn = MySQLdb.connect(host=host,
user=user,
passwd=password,
db=db)
feed_item_query = """
select target_id as issue_id, url, concat_ws(' ', replace(replace(replace(title, '\t', ' '), '\n', ' '), '\r', ' '), replace(replace(replace(description, '\t', ' '), '\n', ' '), '\r', ' ') ) as text
into outfile '%s'
fields terminated by '\t' optionally enclosed by '"'
lines terminated by '\n'
from feed_item fi
join staff_users su
on fi.origin_type='user'
and fi.origin_id = su.user_id
where fi.target_type='issue'
and fi.item_type='user_story'
""" % temp_file
cur = conn.cursor()
cur.execute(feed_item_query)
if __name__ == '__main__':
from fabric.api import local
import time
from optparse import OptionParser
log.setLevel(logging.DEBUG)
parser = OptionParser()
parser.add_option('-d', '--hadoop-path', dest='hadoop_path')
parser.add_option('-f', '--hdfs-path', dest='hdfs_path')
parser.add_option('-t', '--temp-file-path', dest='tempfile_path', default=FeedItemWebCrawler.temp_outfile_path)
parser.add_option('-m', '--mysql-host', dest='mysql_host', default='localhost')
parser.add_option('-p', '--mysql-user', dest='mysql_user', default='root')
parser.add_option('-w', '--mysql-password', dest='mysql_password', default='')
parser.add_option('-b', '--mysql-database', dest='mysql_database', default='jumofeed')
options, args = parser.parse_args()
crawler = FeedItemWebCrawler()
local('rm -f %s' % options.tempfile_path)
crawler.write_to_temp(options.tempfile_path, options.mysql_host, options.mysql_user, options.mysql_password, options.mysql_database)
log.info('Green pool time!')
t1 = time.time()
for result in crawler.crawl(open(options.tempfile_path, 'r')):
log.info(result)
t2 = time.time()
log.info('DONE in %s seconds' % (t2 - t1))
local('rm -f %s' % options.tempfile_path)
local('for f in %(outfiles)s/*.out ; do cat $f >> %(final)s ; echo "" >> %(final)s ; done' % {'final': options.tempfile_path,
'outfiles': FeedItemWebCrawler.outfile_base_path} )
#local('rm -rf %s' % outfile_base_path)
local('%s dfs -copyFromLocal %s %s' % (options.hadoop_path, options.tempfile_path, options.hdfs_path))
| mit |
omprakasha/odoo | addons/mail/mail_followers.py | 168 | 12482 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import threading
from openerp.osv import osv, fields
from openerp import tools, SUPERUSER_ID
from openerp.tools.translate import _
from openerp.tools.mail import plaintext2html
class mail_followers(osv.Model):
""" mail_followers holds the data related to the follow mechanism inside
OpenERP. Partners can choose to follow documents (records) of any kind
that inherits from mail.thread. Following documents allow to receive
notifications for new messages.
A subscription is characterized by:
:param: res_model: model of the followed objects
:param: res_id: ID of resource (may be 0 for every objects)
"""
_name = 'mail.followers'
_rec_name = 'partner_id'
_log_access = False
_description = 'Document Followers'
_columns = {
'res_model': fields.char('Related Document Model',
required=True, select=1,
help='Model of the followed resource'),
'res_id': fields.integer('Related Document ID', select=1,
help='Id of the followed resource'),
'partner_id': fields.many2one('res.partner', string='Related Partner',
ondelete='cascade', required=True, select=1),
'subtype_ids': fields.many2many('mail.message.subtype', string='Subtype',
help="Message subtypes followed, meaning subtypes that will be pushed onto the user's Wall."),
}
#
# Modifying followers change access rights to individual documents. As the
# cache may contain accessible/inaccessible data, one has to refresh it.
#
def create(self, cr, uid, vals, context=None):
res = super(mail_followers, self).create(cr, uid, vals, context=context)
self.invalidate_cache(cr, uid, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
res = super(mail_followers, self).write(cr, uid, ids, vals, context=context)
self.invalidate_cache(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(mail_followers, self).unlink(cr, uid, ids, context=context)
self.invalidate_cache(cr, uid, context=context)
return res
_sql_constraints = [('mail_followers_res_partner_res_model_id_uniq','unique(res_model,res_id,partner_id)','Error, a partner cannot follow twice the same object.')]
class mail_notification(osv.Model):
""" Class holding notifications pushed to partners. Followers and partners
added in 'contacts to notify' receive notifications. """
_name = 'mail.notification'
_rec_name = 'partner_id'
_log_access = False
_description = 'Notifications'
_columns = {
'partner_id': fields.many2one('res.partner', string='Contact',
ondelete='cascade', required=True, select=1),
'is_read': fields.boolean('Read', select=1, oldname='read'),
'starred': fields.boolean('Starred', select=1,
help='Starred message that goes into the todo mailbox'),
'message_id': fields.many2one('mail.message', string='Message',
ondelete='cascade', required=True, select=1),
}
_defaults = {
'is_read': False,
'starred': False,
}
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('mail_notification_partner_id_read_starred_message_id',))
if not cr.fetchone():
cr.execute('CREATE INDEX mail_notification_partner_id_read_starred_message_id ON mail_notification (partner_id, is_read, starred, message_id)')
def get_partners_to_email(self, cr, uid, ids, message, context=None):
""" Return the list of partners to notify, based on their preferences.
:param browse_record message: mail.message to notify
:param list partners_to_notify: optional list of partner ids restricting
the notifications to process
"""
notify_pids = []
for notification in self.browse(cr, uid, ids, context=context):
if notification.is_read:
continue
partner = notification.partner_id
# Do not send to partners without email address defined
if not partner.email:
continue
# Do not send to partners having same email address than the author (can cause loops or bounce effect due to messy database)
if message.author_id and message.author_id.email == partner.email:
continue
# Partner does not want to receive any emails or is opt-out
if partner.notify_email == 'none':
continue
notify_pids.append(partner.id)
return notify_pids
def get_signature_footer(self, cr, uid, user_id, res_model=None, res_id=None, context=None, user_signature=True):
""" Format a standard footer for notification emails (such as pushed messages
notification or invite emails).
Format:
<p>--<br />
Administrator
</p>
<div>
<small>Sent from <a ...>Your Company</a> using <a ...>OpenERP</a>.</small>
</div>
"""
footer = ""
if not user_id:
return footer
# add user signature
user = self.pool.get("res.users").browse(cr, SUPERUSER_ID, [user_id], context=context)[0]
if user_signature:
if user.signature:
signature = user.signature
else:
signature = "--<br />%s" % user.name
footer = tools.append_content_to_html(footer, signature, plaintext=False)
# add company signature
if user.company_id.website:
website_url = ('http://%s' % user.company_id.website) if not user.company_id.website.lower().startswith(('http:', 'https:')) \
else user.company_id.website
company = "<a style='color:inherit' href='%s'>%s</a>" % (website_url, user.company_id.name)
else:
company = user.company_id.name
sent_by = _('Sent by %(company)s using %(odoo)s')
signature_company = '<br /><small>%s</small>' % (sent_by % {
'company': company,
'odoo': "<a style='color:inherit' href='https://www.odoo.com/'>Odoo</a>"
})
footer = tools.append_content_to_html(footer, signature_company, plaintext=False, container_tag='div')
return footer
def update_message_notification(self, cr, uid, ids, message_id, partner_ids, context=None):
existing_pids = set()
new_pids = set()
new_notif_ids = []
for notification in self.browse(cr, uid, ids, context=context):
existing_pids.add(notification.partner_id.id)
# update existing notifications
self.write(cr, uid, ids, {'is_read': False}, context=context)
# create new notifications
new_pids = set(partner_ids) - existing_pids
for new_pid in new_pids:
new_notif_ids.append(self.create(cr, uid, {'message_id': message_id, 'partner_id': new_pid, 'is_read': False}, context=context))
return new_notif_ids
def _notify_email(self, cr, uid, ids, message_id, force_send=False, user_signature=True, context=None):
message = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context)
# compute partners
email_pids = self.get_partners_to_email(cr, uid, ids, message, context=None)
if not email_pids:
return True
# compute email body (signature, company data)
body_html = message.body
# add user signature except for mail groups, where users are usually adding their own signatures already
user_id = message.author_id and message.author_id.user_ids and message.author_id.user_ids[0] and message.author_id.user_ids[0].id or None
signature_company = self.get_signature_footer(cr, uid, user_id, res_model=message.model, res_id=message.res_id, context=context, user_signature=(user_signature and message.model != 'mail.group'))
if signature_company:
body_html = tools.append_content_to_html(body_html, signature_company, plaintext=False, container_tag='div')
# compute email references
references = message.parent_id.message_id if message.parent_id else False
# custom values
custom_values = dict()
if message.model and message.res_id and self.pool.get(message.model) and hasattr(self.pool[message.model], 'message_get_email_values'):
custom_values = self.pool[message.model].message_get_email_values(cr, uid, message.res_id, message, context=context)
# create email values
max_recipients = 50
chunks = [email_pids[x:x + max_recipients] for x in xrange(0, len(email_pids), max_recipients)]
email_ids = []
for chunk in chunks:
mail_values = {
'mail_message_id': message.id,
'auto_delete': (context or {}).get('mail_auto_delete', True),
'mail_server_id': (context or {}).get('mail_server_id', False),
'body_html': body_html,
'recipient_ids': [(4, id) for id in chunk],
'references': references,
}
mail_values.update(custom_values)
email_ids.append(self.pool.get('mail.mail').create(cr, uid, mail_values, context=context))
# NOTE:
# 1. for more than 50 followers, use the queue system
# 2. do not send emails immediately if the registry is not loaded,
# to prevent sending email during a simple update of the database
# using the command-line.
if force_send and len(chunks) < 2 and \
(not self.pool._init or
getattr(threading.currentThread(), 'testing', False)):
self.pool.get('mail.mail').send(cr, uid, email_ids, context=context)
return True
def _notify(self, cr, uid, message_id, partners_to_notify=None, context=None,
force_send=False, user_signature=True):
""" Send by email the notification depending on the user preferences
:param list partners_to_notify: optional list of partner ids restricting
the notifications to process
:param bool force_send: if True, the generated mail.mail is
immediately sent after being created, as if the scheduler
was executed for this message only.
:param bool user_signature: if True, the generated mail.mail body is
the body of the related mail.message with the author's signature
"""
notif_ids = self.search(cr, SUPERUSER_ID, [('message_id', '=', message_id), ('partner_id', 'in', partners_to_notify)], context=context)
# update or create notifications
new_notif_ids = self.update_message_notification(cr, SUPERUSER_ID, notif_ids, message_id, partners_to_notify, context=context)
# mail_notify_noemail (do not send email) or no partner_ids: do not send, return
if context and context.get('mail_notify_noemail'):
return True
# browse as SUPERUSER_ID because of access to res_partner not necessarily allowed
self._notify_email(cr, SUPERUSER_ID, new_notif_ids, message_id, force_send, user_signature, context=context)
| agpl-3.0 |
joshgabriel/dft-crossfilter | CompleteApp/crossfilter_app/old_mains/old_main.py | 3 | 10263 | # main.py that controls the whole app
# to run: just run bokeh serve --show crossfilter_app in the benchmark-view repo
from random import random
import os
from bokeh.layouts import column
from bokeh.models import Button
from bokeh.models.widgets import Select, MultiSelect, Slider
from bokeh.palettes import RdYlBu3
from bokeh.plotting import figure, curdoc
#### CROSSFILTER PART ##### >>> Module load errors throwing up how to do a relative import ?
from crossview.crossfilter.models import CrossFilter
#from benchmark.loader import load
#### DATA INPUT FROM REST API ######
#from benchmark.loader import load
#### DATA INPUT STRAIGHT FROM PANDAS for test purposes ####
import pandas as pd
##### PLOTTING PART -- GLOBAL FIGURE CREATION ########
# create a plot and style its properties
## gloabl data interface to come from REST API
vasp_data = pd.read_csv('../benchmark/data/francesca_data_head.csv')
p = figure(x_range=(0, 100), y_range=(0, 100), toolbar_location='below')
#p.border_fill_color = 'black'
#p.background_fill_color = 'black'
p.outline_line_color = None
p.grid.grid_line_color = None
#### FORMAT OF DATA SENT TO WIDGET #######
# add a text renderer to out plot (no data yet)
r = p.text(x=[], y=[], text=[], text_color=[], text_font_size="20pt",
text_baseline="middle", text_align="center")
r2 = p.circle(x=[], y=[])
i = 0
ds = r.data_source
ds2 = r2.data_source
##### WIDGET RESPONSES IN THE FORM OF CALLBACKS ######
# create a callback that will add a number in a random location
def callback():
global i
# BEST PRACTICE --- update .data in one step with a new dict
new_data = dict()
new_data['x'] = ds.data['x'] + [random()*70 + 15]
new_data['y'] = ds.data['y'] + [random()*70 + 15]
new_data['text_color'] = ds.data['text_color'] + [RdYlBu3[i%3]]
new_data['text'] = ds.data['text'] + [str(i)]
ds.data = new_data
i = i + 1
#### The make crossfilter callback
#### make data loading as easy as possible for now straight from
#### the benchmark data csv file not from the API with the decorators
#### TO DO after we see that the crossfilter and new bokeh play nicely
##########: integrate with API and uncomment the decorators and data loader
#@bokeh_app.route("/bokeh/benchmark/")
#@object_page("benchmark")
#### RENDERERS OF WIDGETS #####
def make_bokeh_crossfilter(axis='k-point'):
"""The root crossfilter controller"""
# Loading the dft data head as a
# pandas dataframe
new_data = dict()
# new_data = load("./benchmark/data/francesca_data_head")
# use a straight pandas dataframe for now instead and follow the
# BEST PRACTICE described above basically clean up the data object on each callback.
# data that will be given back on the callback
new_data = vasp_data # our data that will be replaced by the API
global p
p = CrossFilter.create(df=new_data)
print (type(p))
# dont know what Crossfilter class really returns in terms of data but for testnig purposes lets
# return something that is compatible with the new_data dictionary return in the
# vanilla example through the global object ds.data
# for example the x - y coordinates on the plots correspond to mins on the data set in k-point and value fields
# new_data['x'] = ds2.data['x'] + list(data[axis])
# new_data['y'] = ds2.data['y'] + list(data['value'])
# other stuff default as in vanilla callback()
# for test purposes to see actually what coordinate is getting plotted
# it is always going to be the same duh beccause only one min exist in the dataset
# its at x = 6, y = -12 ,
# SUCESS learnt how to create a custom callback !!! that loads a CSV file and does something with it
# print ("New data from crossfilter", new_data)
# finally assign to ds.data
# ds2.data = new_data
def make_wflow_crossfilter(tags={'element_widget':['Cu', 'Pd', 'Mo'], 'code_widget':['VASP'], 'ExchCorr':['PBE']}):
"""
demo crossfilter based on pure pandas dataframes that serves a data processing
workflow that selects inputs from widgets
args:
tags: dict of selections by upto 3 widgets
returns:
dictionary of crossfiltered dataframes that can further be processed down the workflow
"""
## Actual widget controlled inputs ##
# elements = tags['element']
# exchanges = tags['ExchCorr']
# propys = tags['code_widget']
## Demo user inputs for testing selects everything in the test csv : max data load ##
elements = np.unique(vasp_data['element'])
exchanges = np.unique(vasp_data['exchange'])
propys = ['B','dB','a0']
# final dictionary of crossfiltered dataframes
crossfilts = {}
# crossfiltering part - playing the role of the "Crossfilter class in bokeh.models"
for pr in propys:
for el in elements:
for ex in exchanges:
# crossfilter down to exchange and element
elems = vasp_data[vasp_data['element']==el]
exchs = elems[elems['exchange']==ex]
# separate into properties, energy, kpoints
p = exchs[exchs['property']==pr]
e = exchs[exchs['property']=='e0']
##### *** Accuracy calculation based on default standards *** #####
# choose reference from dict
ref_e = expt_ref_prb[el][pr]
ref_w = wien_ref[el][pr]
# calculate percent errors on property - ACCURACY CALCULATION based on default standards
props = [v for v in p['value'] ]
percs_wien = [ (v - ref_w) / ref_w * 100 for v in p['value']]
percs_prb = [ (v - ref_e) / ref_e * 100 for v in p['value']]
kpts = [ k for k in p['k-point']]
kpts_atom = [ k**3 for k in p['k-point'] ]
##### *** Accuracy calculation based on default standards *** #####
##### *** Calculate prec_sigma of energy *** #####
energy = [ v for v in e['value']]
end= len(energy) - 1
prec_sigma = [ v - energy[end] for v in energy]
# make data frame of kpoints, energy, percent errors on property
if kpts and energy and props:
NAME = '_'.join([el,ex,pr])
Rdata =\
pd.DataFrame({'Kpoints_size':kpts, 'Kpoints_atom_density':kpts_atom, 'Energy':energy, 'Prec_Sigma':prec_sigma , pr:props, 'percent_error_wien':percs_wien, 'percent_error_expt':percs_prb })
crossfilts[NAME] = Rdata
def calculate_prec(cross_df, automate= False):
"""
function that calculates the prec_inf using R
and returns a fully contructed plottable dataframe
Args:
cross_df: pandas dataframe containing the data
automate: bool, a To do feature to automatically calculate the best fit
Returns:
dataframe contining the R added precision values to be
received most always by the plotting commander.
"""
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
import rpy2.robjects.numpy2ri
import rpy2.rinterface as rin
stats = importr('stats')
base = importr('base')
# activate R environemnt in python
rpy2.robjects.numpy2ri.activate()
pandas2ri.activate()
# read in necessary elements ofmenu = [("Item 1", "item_1_value"), ("Item 2", "item_2_value"), ("Item 3", "item_3_value")]
df = pd.DataFrame({'x': cross_df['Kpoints_atom_density'],
'y': cross_df['Energy']})
ro.globalenv['dataframe']=df
### *** R used to obtain the fit on the data to calculate prec_inf *** ###
# perform regression - bokeh widgets can be used here to provide the inputs to the nls regression
# some python to R translation of object names via the pandas - R dataframes
y = df['y']
x = df['x']
l = len(y) - 1 # needed because R indexes list from 1 to len(list)
# ***WIDGET inputs*** # OR AUTOMATE
# the slider inputs on starting point or can be automated also
l1 = 3
l2 = 0
fitover = rin.SexpVector(list(range(l1,l-l2)), rin.INTSXP)
# numeric entry widget for 'b' is plausible for user to choose best starting guess
start_guess = {'a': y[l], 'b': 5}
start=pandas2ri.py2ri(pd.DataFrame(start_guess,index=start_guess))
# drop down list selection of model
model = 'y~a*x/(b+x)'
# Minimize function with weights and selection
m = \
stats.nls(model, start = start, algorithm = "port", subset = fitover, weights = x^2, data=base.as_symbol('dataframe'))
# Estimation of goodness of fit
g = stats.cor(y[l1:l-l2],stats.predict(m))
# Report summary of fit, values and error bars
print( base.summary(m).rx2('coefficients') )
# Extrapolation value is given by a
a = stats.coef(m)[1]
# Calculation of precision
prec = abs(y-a)
# test print outs of the data ? how to render onto html like Shiny if necesary ?
print("We learn that the converged value is: {0} and best precision achieved in the measurement is {1}".format(a, min(abs(prec))))
cross_df['Energy_Prec_Inf'] = prec
# close the R environments
rpy2.robjects.numpy2ri.deactivate()
pandas2ri.deactivate()
return (cross_df)
def make_widgets():
"""
main function that will control the rendering of UI widgets
"""
pass
#### WIDGET CREATIONS ####
# OLD VANILLA
# add a button widget and configure with the call back
# button_basic = Button(label="Press Me")
# button_basic.on_click(callback)
#make_bokeh_crossfilter()
# create a button for Select button for input
#menu = [("Bulk Modulus", "B"), ("B'", "dB"), ("Lattice Constant", "a0")]
#select_property = Select(name="Selection", options=menu, value="B")
#select_property.on_click(make_bokeh_crossfilter(axis=value))
# create a button for make crossfilter app
button_crossfilter = Button(label="Make Crossfilter")
button_crossfilter.on_click(make_bokeh_crossfilter)
#create a button for crossfilter_workflwo
button_w_crossfilter = Button(label="Make Crossfilter Workflow")
button_w_crossfilter.on_click(make_wflow_crossfilter)
# put the button and plot in a layout and add to the document
curdoc().add_root(column(button_crossfilter, button_w_crossfilter, p))
| mit |
cardforcoin/chain-bitcoin-python | chain_bitcoin/models.py | 2 | 7168 | from __future__ import absolute_import
__all__ = (
'Address', 'Transaction', 'Output', 'OpReturn', 'Block', 'Webhook',
'WebhookEvent', 'EchoVerificationEvent', 'AddressTransactionEvent',
'SendTransactionResult'
)
import dateutil.parser
from .func_util import compose, if_not_none
from .map_list import *
from .namedtuple import *
class Address(namedtuple('Address', (
'hash', 'balance', 'received', 'sent', 'unconfirmed_received',
'unconfirmed_sent', 'unconfirmed_balance'
))):
"""
https://chain.com/docs/v1/curl/#object-bitcoin-address
The Address Object contains basic balance details for a Bitcoin address.
"""
class Transaction(namedtuple('Transaction', (
'hash', 'block_hash', 'block_height', 'block_time', 'inputs', 'outputs',
'amount', 'fees', 'confirmations', 'chain_received_at', 'propagation_level',
'double_spend'
), alter_dict=compose(
transform_item(
'inputs', lambda v: map_list(Transaction.Input.from_dict, v)),
transform_item(
'outputs', lambda v: map_list(Transaction.Output.from_dict, v)),
transform_item('block_time', if_not_none(dateutil.parser.parse)),
))):
"""
https://chain.com/docs/v1/curl/#object-bitcoin-transaction
The Transaction Object contains details about a Bitcoin transaction,
including inputs and outputs.
"""
class Input(namedtuple('Transaction_Input', (
'transaction_hash', 'output_hash', 'output_index', 'value', 'addresses',
'script_signature', 'coinbase'
))):
"""
https://chain.com/docs/v1/curl/#object-bitcoin-transaction
Values of ``Transaction.inputs``.
"""
class Output(namedtuple('Transaction_Output', (
'transaction_hash', 'output_index', 'value', 'addresses', 'script',
'script_hex', 'script_type', 'required_signatures', 'spent',
))):
"""
https://chain.com/docs/v1/curl/#object-bitcoin-transaction
Values of ``Transaction.outputs``.
"""
class SendTransactionResult(namedtuple('SendTransactionResult', ('hash',))):
"""
https://chain.com/docs/v1/curl/#bitcoin-transaction-send
The value returned by the ``send_transaction`` endpoint.
Arguments:
hash: string - The newly created transaction hash.
"""
class Output(namedtuple('TransactionOutput', (
'transaction_hash', 'output_index', 'value', 'addresses', 'script',
'script_hex', 'script_type', 'required_signatures', 'spent',
'confirmations'
))):
"""
https://chain.com/docs/v1/curl/#bitcoin-address-unspents
The Output Object is a pseudo-object that is extracted from the
Transaction Object for use when the entire Transaction Object is not
needed.
"""
class OpReturn(namedtuple('OpReturn', (
'transaction_hash', 'hex', 'text', 'sender_addresses', 'receiver_addresses'
))):
"""
https://chain.com/docs/v1/curl/#bitcoin-address-op-returns
The OP_RETURN Object is a pseudo-object that is extracted from the
Transaction Object. It is an interpretation of the OP_RETURN script
within a zero-value output in a Bitcoin transaction. The OP_RETURN can
be used to include 40 bytes of metadata in a Bitcoin transaction.
"""
class Block(namedtuple('Block', (
'hash', 'previous_block_hash', 'height', 'confirmations', 'merkle_root',
'time', 'nonce', 'difficulty', 'transaction_hashes'
), alter_dict=transform_item('time', if_not_none(dateutil.parser.parse)))):
"""
https://chain.com/docs/v1/curl/#object-bitcoin-block
The Block Object contains details about a Bitcoin block, including all
transaction hashes.
"""
class Webhook(namedtuple('Webhook', ('id', 'url'))):
"""
https://chain.com/docs/v1/curl/#object-webhooks
The Webhook Object contains a server URL that the Chain API uses to
communicate with your application. You can create one or more Webhook
Objects, which may each have one or more associated Event Objects.
"""
class WebhookEvent:
"""
https://chain.com/docs/v1/curl/#object-webhooks-event
"""
event = None
"""
https://chain.com/docs/v1/curl/#object-webhooks-event
The event that will trigger the Webhook's POST request.
"""
class Message(object):
"""
https://chain.com/docs/v1/curl/#webhooks-receiving
The class for data sent to the webhook URL.
"""
@classmethod
def from_dict(cls, x):
x = dict(x)
event_type = next(t for t in webhook_event_types
if t.event == x['event'])
return event_type.Message.from_dict(x)
@classmethod
def from_dict(cls, x):
x = dict(x)
event_type = next(t for t in webhook_event_types
if t.event == x['event'])
return event_type.from_dict(x)
echo_verification_event = 'echo-verification'
address_transaction_event = 'address-transaction'
class EchoVerificationEvent(
namedtuple('EchoVerificationEvent', alter_dict=remove_item('event'))
):
"""
https://chain.com/docs/v1/curl/#webhooks-setup
Each time you create a new Webhook, before completing the request, the
Chain API will make a test request to the Webhook in order to verify
ownership.
To pass the verification test, your web server must respond with an exact
copy of the request body.
"""
event = echo_verification_event
class Message(namedtuple(
'EchoVerificationEvent_Message',
alter_dict=remove_item('event')
)):
pass
class AddressTransactionEvent(
namedtuple('AddressTransactionEvent', (
'id', 'webhook_id', 'block_chain', 'address', 'confirmations'
), alter_dict=remove_item('event')),
WebhookEvent,
):
"""
https://chain.com/docs/v1/curl/#object-webhooks-event
This event triggers when a new transaction occurs on a specified address.
The first POST will notify your application of the new, unconfirmed
transaction. Additional POSTs will notify your application of subsequent
confirmations for that transction.
Arguments:
id: string
webhook_id: string - The unique id of the associated Webhook.
block_chain: string - The name of the block chain that the Webhook
Event is associated with.
address: string - The address that will be used to match Webhook
Events.
confirmations: number - The number of confirmations that will be
POSTed to the Webhook for each new transaction.
"""
event = address_transaction_event
class Message(namedtuple('AddressTransactionEvent_Message', (
'transaction', 'block_chain', 'address'
), alter_dict=compose(
transform_item('transaction', Transaction.from_dict),
remove_item('event'),
))):
"""
https://chain.com/docs/v1/curl/#webhooks-receiving
The data structure that will be POSTed to your server from an
address-transaction event.
"""
event = address_transaction_event
webhook_event_types = (
EchoVerificationEvent,
AddressTransactionEvent,
)
| mit |
Agana/MyBlogAgain | django/utils/_os.py | 153 | 2736 | import os
import stat
from os.path import join, normcase, normpath, abspath, isabs, sep
from django.utils.encoding import force_unicode
# Define our own abspath function that can handle joining
# unicode paths to a current working directory that has non-ASCII
# characters in it. This isn't necessary on Windows since the
# Windows version of abspath handles this correctly. The Windows
# abspath also handles drive letters differently than the pure
# Python implementation, so it's best not to replace it.
if os.name == 'nt':
abspathu = abspath
else:
def abspathu(path):
"""
Version of os.path.abspath that uses the unicode representation
of the current working directory, thus avoiding a UnicodeDecodeError
in join when the cwd has non-ASCII characters.
"""
if not isabs(path):
path = join(os.getcwdu(), path)
return normpath(path)
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
# We need to use normcase to ensure we don't false-negative on case
# insensitive operating systems (like Windows).
base = force_unicode(base)
paths = [force_unicode(p) for p in paths]
final_path = normcase(abspathu(join(base, *paths)))
base_path = normcase(abspathu(base))
base_path_len = len(base_path)
# Ensure final_path starts with base_path and that the next character after
# the final path is os.sep (or nothing, in which case final_path must be
# equal to base_path).
if not final_path.startswith(base_path) \
or final_path[base_path_len:base_path_len+1] not in ('', sep):
raise ValueError('The joined path (%s) is located outside of the base '
'path component (%s)' % (final_path, base_path))
return final_path
def rmtree_errorhandler(func, path, exc_info):
"""
On Windows, some files are read-only (e.g. in in .svn dirs), so when
rmtree() tries to remove them, an exception is thrown.
We catch that here, remove the read-only attribute, and hopefully
continue without problems.
"""
exctype, value = exc_info[:2]
# lookin for a windows error
if exctype is not WindowsError or 'Access is denied' not in str(value):
raise
# file type should currently be read only
if ((os.stat(path).st_mode & stat.S_IREAD) != stat.S_IREAD):
raise
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
| bsd-3-clause |
moritzpein/airflow | airflow/hooks/__init__.py | 7 | 1089 | # Imports the hooks dynamically while keeping the package API clean,
# abstracting the underlying modules
from airflow.utils import import_module_attrs as _import_module_attrs
from airflow.hooks.base_hook import BaseHook as _BaseHook
_hooks = {
'hive_hooks': [
'HiveCliHook',
'HiveMetastoreHook',
'HiveServer2Hook',
],
'hdfs_hook': ['HDFSHook'],
'webhdfs_hook': ['WebHDFSHook'],
'mysql_hook': ['MySqlHook'],
'postgres_hook': ['PostgresHook'],
'presto_hook': ['PrestoHook'],
'samba_hook': ['SambaHook'],
'sqlite_hook': ['SqliteHook'],
'S3_hook': ['S3Hook'],
'http_hook': ['HttpHook'],
'druid_hook': ['DruidHook'],
'jdbc_hook': ['JdbcHook'],
'dbapi_hook': ['DbApiHook'],
'mssql_hook': ['MsSqlHook'],
'oracle_hook': ['OracleHook'],
}
_import_module_attrs(globals(), _hooks)
from airflow.hooks.base_hook import BaseHook
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import hooks as _hooks
for _h in _hooks:
globals()[_h.__name__] = _h
| apache-2.0 |
zootsuitbrian/zxing | cpp/scons/scons-local-2.0.0.final.0/SCons/Node/FS.py | 34 | 110895 | """scons.Node.FS
File system nodes.
These Nodes represent the canonical external objects that people think
of when they think of building software: files and directories.
This holds a "default_fs" variable that should be initialized with an FS
that can be used by scripts or modules looking for the canonical default.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Node/FS.py 5023 2010/06/14 22:05:46 scons"
import fnmatch
import os
import re
import shutil
import stat
import sys
import time
import codecs
import SCons.Action
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Memoize
import SCons.Node
import SCons.Node.Alias
import SCons.Subst
import SCons.Util
import SCons.Warnings
from SCons.Debug import Trace
do_store_info = True
class EntryProxyAttributeError(AttributeError):
"""
An AttributeError subclass for recording and displaying the name
of the underlying Entry involved in an AttributeError exception.
"""
def __init__(self, entry_proxy, attribute):
AttributeError.__init__(self)
self.entry_proxy = entry_proxy
self.attribute = attribute
def __str__(self):
entry = self.entry_proxy.get()
fmt = "%s instance %s has no attribute %s"
return fmt % (entry.__class__.__name__,
repr(entry.name),
repr(self.attribute))
# The max_drift value: by default, use a cached signature value for
# any file that's been untouched for more than two days.
default_max_drift = 2*24*60*60
#
# We stringify these file system Nodes a lot. Turning a file system Node
# into a string is non-trivial, because the final string representation
# can depend on a lot of factors: whether it's a derived target or not,
# whether it's linked to a repository or source directory, and whether
# there's duplication going on. The normal technique for optimizing
# calculations like this is to memoize (cache) the string value, so you
# only have to do the calculation once.
#
# A number of the above factors, however, can be set after we've already
# been asked to return a string for a Node, because a Repository() or
# VariantDir() call or the like may not occur until later in SConscript
# files. So this variable controls whether we bother trying to save
# string values for Nodes. The wrapper interface can set this whenever
# they're done mucking with Repository and VariantDir and the other stuff,
# to let this module know it can start returning saved string values
# for Nodes.
#
Save_Strings = None
def save_strings(val):
global Save_Strings
Save_Strings = val
#
# Avoid unnecessary function calls by recording a Boolean value that
# tells us whether or not os.path.splitdrive() actually does anything
# on this system, and therefore whether we need to bother calling it
# when looking up path names in various methods below.
#
do_splitdrive = None
def initialize_do_splitdrive():
global do_splitdrive
drive, path = os.path.splitdrive('X:/foo')
do_splitdrive = not not drive
initialize_do_splitdrive()
#
needs_normpath_check = None
def initialize_normpath_check():
"""
Initialize the normpath_check regular expression.
This function is used by the unit tests to re-initialize the pattern
when testing for behavior with different values of os.sep.
"""
global needs_normpath_check
if os.sep == '/':
pattern = r'.*/|\.$|\.\.$'
else:
pattern = r'.*[/%s]|\.$|\.\.$' % re.escape(os.sep)
needs_normpath_check = re.compile(pattern)
initialize_normpath_check()
#
# SCons.Action objects for interacting with the outside world.
#
# The Node.FS methods in this module should use these actions to
# create and/or remove files and directories; they should *not* use
# os.{link,symlink,unlink,mkdir}(), etc., directly.
#
# Using these SCons.Action objects ensures that descriptions of these
# external activities are properly displayed, that the displays are
# suppressed when the -s (silent) option is used, and (most importantly)
# the actions are disabled when the the -n option is used, in which case
# there should be *no* changes to the external file system(s)...
#
if hasattr(os, 'link'):
def _hardlink_func(fs, src, dst):
# If the source is a symlink, we can't just hard-link to it
# because a relative symlink may point somewhere completely
# different. We must disambiguate the symlink and then
# hard-link the final destination file.
while fs.islink(src):
link = fs.readlink(src)
if not os.path.isabs(link):
src = link
else:
src = os.path.join(os.path.dirname(src), link)
fs.link(src, dst)
else:
_hardlink_func = None
if hasattr(os, 'symlink'):
def _softlink_func(fs, src, dst):
fs.symlink(src, dst)
else:
_softlink_func = None
def _copy_func(fs, src, dest):
shutil.copy2(src, dest)
st = fs.stat(src)
fs.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
Valid_Duplicates = ['hard-soft-copy', 'soft-hard-copy',
'hard-copy', 'soft-copy', 'copy']
Link_Funcs = [] # contains the callables of the specified duplication style
def set_duplicate(duplicate):
# Fill in the Link_Funcs list according to the argument
# (discarding those not available on the platform).
# Set up the dictionary that maps the argument names to the
# underlying implementations. We do this inside this function,
# not in the top-level module code, so that we can remap os.link
# and os.symlink for testing purposes.
link_dict = {
'hard' : _hardlink_func,
'soft' : _softlink_func,
'copy' : _copy_func
}
if not duplicate in Valid_Duplicates:
raise SCons.Errors.InternalError("The argument of set_duplicate "
"should be in Valid_Duplicates")
global Link_Funcs
Link_Funcs = []
for func in duplicate.split('-'):
if link_dict[func]:
Link_Funcs.append(link_dict[func])
def LinkFunc(target, source, env):
# Relative paths cause problems with symbolic links, so
# we use absolute paths, which may be a problem for people
# who want to move their soft-linked src-trees around. Those
# people should use the 'hard-copy' mode, softlinks cannot be
# used for that; at least I have no idea how ...
src = source[0].abspath
dest = target[0].abspath
dir, file = os.path.split(dest)
if dir and not target[0].fs.isdir(dir):
os.makedirs(dir)
if not Link_Funcs:
# Set a default order of link functions.
set_duplicate('hard-soft-copy')
fs = source[0].fs
# Now link the files with the previously specified order.
for func in Link_Funcs:
try:
func(fs, src, dest)
break
except (IOError, OSError):
# An OSError indicates something happened like a permissions
# problem or an attempt to symlink across file-system
# boundaries. An IOError indicates something like the file
# not existing. In either case, keeping trying additional
# functions in the list and only raise an error if the last
# one failed.
if func == Link_Funcs[-1]:
# exception of the last link method (copy) are fatal
raise
return 0
Link = SCons.Action.Action(LinkFunc, None)
def LocalString(target, source, env):
return 'Local copy of %s from %s' % (target[0], source[0])
LocalCopy = SCons.Action.Action(LinkFunc, LocalString)
def UnlinkFunc(target, source, env):
t = target[0]
t.fs.unlink(t.abspath)
return 0
Unlink = SCons.Action.Action(UnlinkFunc, None)
def MkdirFunc(target, source, env):
t = target[0]
if not t.exists():
t.fs.mkdir(t.abspath)
return 0
Mkdir = SCons.Action.Action(MkdirFunc, None, presub=None)
MkdirBuilder = None
def get_MkdirBuilder():
global MkdirBuilder
if MkdirBuilder is None:
import SCons.Builder
import SCons.Defaults
# "env" will get filled in by Executor.get_build_env()
# calling SCons.Defaults.DefaultEnvironment() when necessary.
MkdirBuilder = SCons.Builder.Builder(action = Mkdir,
env = None,
explain = None,
is_explicit = None,
target_scanner = SCons.Defaults.DirEntryScanner,
name = "MkdirBuilder")
return MkdirBuilder
class _Null(object):
pass
_null = _Null()
DefaultSCCSBuilder = None
DefaultRCSBuilder = None
def get_DefaultSCCSBuilder():
global DefaultSCCSBuilder
if DefaultSCCSBuilder is None:
import SCons.Builder
# "env" will get filled in by Executor.get_build_env()
# calling SCons.Defaults.DefaultEnvironment() when necessary.
act = SCons.Action.Action('$SCCSCOM', '$SCCSCOMSTR')
DefaultSCCSBuilder = SCons.Builder.Builder(action = act,
env = None,
name = "DefaultSCCSBuilder")
return DefaultSCCSBuilder
def get_DefaultRCSBuilder():
global DefaultRCSBuilder
if DefaultRCSBuilder is None:
import SCons.Builder
# "env" will get filled in by Executor.get_build_env()
# calling SCons.Defaults.DefaultEnvironment() when necessary.
act = SCons.Action.Action('$RCS_COCOM', '$RCS_COCOMSTR')
DefaultRCSBuilder = SCons.Builder.Builder(action = act,
env = None,
name = "DefaultRCSBuilder")
return DefaultRCSBuilder
# Cygwin's os.path.normcase pretends it's on a case-sensitive filesystem.
_is_cygwin = sys.platform == "cygwin"
if os.path.normcase("TeSt") == os.path.normpath("TeSt") and not _is_cygwin:
def _my_normcase(x):
return x
else:
def _my_normcase(x):
return x.upper()
class DiskChecker(object):
def __init__(self, type, do, ignore):
self.type = type
self.do = do
self.ignore = ignore
self.func = do
def __call__(self, *args, **kw):
return self.func(*args, **kw)
def set(self, list):
if self.type in list:
self.func = self.do
else:
self.func = self.ignore
def do_diskcheck_match(node, predicate, errorfmt):
result = predicate()
try:
# If calling the predicate() cached a None value from stat(),
# remove it so it doesn't interfere with later attempts to
# build this Node as we walk the DAG. (This isn't a great way
# to do this, we're reaching into an interface that doesn't
# really belong to us, but it's all about performance, so
# for now we'll just document the dependency...)
if node._memo['stat'] is None:
del node._memo['stat']
except (AttributeError, KeyError):
pass
if result:
raise TypeError(errorfmt % node.abspath)
def ignore_diskcheck_match(node, predicate, errorfmt):
pass
def do_diskcheck_rcs(node, name):
try:
rcs_dir = node.rcs_dir
except AttributeError:
if node.entry_exists_on_disk('RCS'):
rcs_dir = node.Dir('RCS')
else:
rcs_dir = None
node.rcs_dir = rcs_dir
if rcs_dir:
return rcs_dir.entry_exists_on_disk(name+',v')
return None
def ignore_diskcheck_rcs(node, name):
return None
def do_diskcheck_sccs(node, name):
try:
sccs_dir = node.sccs_dir
except AttributeError:
if node.entry_exists_on_disk('SCCS'):
sccs_dir = node.Dir('SCCS')
else:
sccs_dir = None
node.sccs_dir = sccs_dir
if sccs_dir:
return sccs_dir.entry_exists_on_disk('s.'+name)
return None
def ignore_diskcheck_sccs(node, name):
return None
diskcheck_match = DiskChecker('match', do_diskcheck_match, ignore_diskcheck_match)
diskcheck_rcs = DiskChecker('rcs', do_diskcheck_rcs, ignore_diskcheck_rcs)
diskcheck_sccs = DiskChecker('sccs', do_diskcheck_sccs, ignore_diskcheck_sccs)
diskcheckers = [
diskcheck_match,
diskcheck_rcs,
diskcheck_sccs,
]
def set_diskcheck(list):
for dc in diskcheckers:
dc.set(list)
def diskcheck_types():
return [dc.type for dc in diskcheckers]
class EntryProxy(SCons.Util.Proxy):
__str__ = SCons.Util.Delegate('__str__')
def __get_abspath(self):
entry = self.get()
return SCons.Subst.SpecialAttrWrapper(entry.get_abspath(),
entry.name + "_abspath")
def __get_filebase(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[0],
name + "_filebase")
def __get_suffix(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[1],
name + "_suffix")
def __get_file(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(name, name + "_file")
def __get_base_path(self):
"""Return the file's directory and file name, with the
suffix stripped."""
entry = self.get()
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(entry.get_path())[0],
entry.name + "_base")
def __get_posix_path(self):
"""Return the path with / as the path separator,
regardless of platform."""
if os.sep == '/':
return self
else:
entry = self.get()
r = entry.get_path().replace(os.sep, '/')
return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_posix")
def __get_windows_path(self):
"""Return the path with \ as the path separator,
regardless of platform."""
if os.sep == '\\':
return self
else:
entry = self.get()
r = entry.get_path().replace(os.sep, '\\')
return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_windows")
def __get_srcnode(self):
return EntryProxy(self.get().srcnode())
def __get_srcdir(self):
"""Returns the directory containing the source node linked to this
node via VariantDir(), or the directory of this node if not linked."""
return EntryProxy(self.get().srcnode().dir)
def __get_rsrcnode(self):
return EntryProxy(self.get().srcnode().rfile())
def __get_rsrcdir(self):
"""Returns the directory containing the source node linked to this
node via VariantDir(), or the directory of this node if not linked."""
return EntryProxy(self.get().srcnode().rfile().dir)
def __get_dir(self):
return EntryProxy(self.get().dir)
dictSpecialAttrs = { "base" : __get_base_path,
"posix" : __get_posix_path,
"windows" : __get_windows_path,
"win32" : __get_windows_path,
"srcpath" : __get_srcnode,
"srcdir" : __get_srcdir,
"dir" : __get_dir,
"abspath" : __get_abspath,
"filebase" : __get_filebase,
"suffix" : __get_suffix,
"file" : __get_file,
"rsrcpath" : __get_rsrcnode,
"rsrcdir" : __get_rsrcdir,
}
def __getattr__(self, name):
# This is how we implement the "special" attributes
# such as base, posix, srcdir, etc.
try:
attr_function = self.dictSpecialAttrs[name]
except KeyError:
try:
attr = SCons.Util.Proxy.__getattr__(self, name)
except AttributeError, e:
# Raise our own AttributeError subclass with an
# overridden __str__() method that identifies the
# name of the entry that caused the exception.
raise EntryProxyAttributeError(self, name)
return attr
else:
return attr_function(self)
class Base(SCons.Node.Node):
"""A generic class for file system entries. This class is for
when we don't know yet whether the entry being looked up is a file
or a directory. Instances of this class can morph into either
Dir or File objects by a later, more precise lookup.
Note: this class does not define __cmp__ and __hash__ for
efficiency reasons. SCons does a lot of comparing of
Node.FS.{Base,Entry,File,Dir} objects, so those operations must be
as fast as possible, which means we want to use Python's built-in
object identity comparisons.
"""
memoizer_counters = []
def __init__(self, name, directory, fs):
"""Initialize a generic Node.FS.Base object.
Call the superclass initialization, take care of setting up
our relative and absolute paths, identify our parent
directory, and indicate that this node should use
signatures."""
if __debug__: logInstanceCreation(self, 'Node.FS.Base')
SCons.Node.Node.__init__(self)
# Filenames and paths are probably reused and are intern'ed to
# save some memory.
self.name = SCons.Util.silent_intern(name)
self.suffix = SCons.Util.silent_intern(SCons.Util.splitext(name)[1])
self.fs = fs
assert directory, "A directory must be provided"
self.abspath = SCons.Util.silent_intern(directory.entry_abspath(name))
self.labspath = SCons.Util.silent_intern(directory.entry_labspath(name))
if directory.path == '.':
self.path = SCons.Util.silent_intern(name)
else:
self.path = SCons.Util.silent_intern(directory.entry_path(name))
if directory.tpath == '.':
self.tpath = SCons.Util.silent_intern(name)
else:
self.tpath = SCons.Util.silent_intern(directory.entry_tpath(name))
self.path_elements = directory.path_elements + [self]
self.dir = directory
self.cwd = None # will hold the SConscript directory for target nodes
self.duplicate = directory.duplicate
def str_for_display(self):
return '"' + self.__str__() + '"'
def must_be_same(self, klass):
"""
This node, which already existed, is being looked up as the
specified klass. Raise an exception if it isn't.
"""
if isinstance(self, klass) or klass is Entry:
return
raise TypeError("Tried to lookup %s '%s' as a %s." %\
(self.__class__.__name__, self.path, klass.__name__))
def get_dir(self):
return self.dir
def get_suffix(self):
return self.suffix
def rfile(self):
return self
def __str__(self):
"""A Node.FS.Base object's string representation is its path
name."""
global Save_Strings
if Save_Strings:
return self._save_str()
return self._get_str()
memoizer_counters.append(SCons.Memoize.CountValue('_save_str'))
def _save_str(self):
try:
return self._memo['_save_str']
except KeyError:
pass
result = sys.intern(self._get_str())
self._memo['_save_str'] = result
return result
def _get_str(self):
global Save_Strings
if self.duplicate or self.is_derived():
return self.get_path()
srcnode = self.srcnode()
if srcnode.stat() is None and self.stat() is not None:
result = self.get_path()
else:
result = srcnode.get_path()
if not Save_Strings:
# We're not at the point where we're saving the string string
# representations of FS Nodes (because we haven't finished
# reading the SConscript files and need to have str() return
# things relative to them). That also means we can't yet
# cache values returned (or not returned) by stat(), since
# Python code in the SConscript files might still create
# or otherwise affect the on-disk file. So get rid of the
# values that the underlying stat() method saved.
try: del self._memo['stat']
except KeyError: pass
if self is not srcnode:
try: del srcnode._memo['stat']
except KeyError: pass
return result
rstr = __str__
memoizer_counters.append(SCons.Memoize.CountValue('stat'))
def stat(self):
try: return self._memo['stat']
except KeyError: pass
try: result = self.fs.stat(self.abspath)
except os.error: result = None
self._memo['stat'] = result
return result
def exists(self):
return self.stat() is not None
def rexists(self):
return self.rfile().exists()
def getmtime(self):
st = self.stat()
if st: return st[stat.ST_MTIME]
else: return None
def getsize(self):
st = self.stat()
if st: return st[stat.ST_SIZE]
else: return None
def isdir(self):
st = self.stat()
return st is not None and stat.S_ISDIR(st[stat.ST_MODE])
def isfile(self):
st = self.stat()
return st is not None and stat.S_ISREG(st[stat.ST_MODE])
if hasattr(os, 'symlink'):
def islink(self):
try: st = self.fs.lstat(self.abspath)
except os.error: return 0
return stat.S_ISLNK(st[stat.ST_MODE])
else:
def islink(self):
return 0 # no symlinks
def is_under(self, dir):
if self is dir:
return 1
else:
return self.dir.is_under(dir)
def set_local(self):
self._local = 1
def srcnode(self):
"""If this node is in a build path, return the node
corresponding to its source file. Otherwise, return
ourself.
"""
srcdir_list = self.dir.srcdir_list()
if srcdir_list:
srcnode = srcdir_list[0].Entry(self.name)
srcnode.must_be_same(self.__class__)
return srcnode
return self
def get_path(self, dir=None):
"""Return path relative to the current working directory of the
Node.FS.Base object that owns us."""
if not dir:
dir = self.fs.getcwd()
if self == dir:
return '.'
path_elems = self.path_elements
try: i = path_elems.index(dir)
except ValueError: pass
else: path_elems = path_elems[i+1:]
path_elems = [n.name for n in path_elems]
return os.sep.join(path_elems)
def set_src_builder(self, builder):
"""Set the source code builder for this node."""
self.sbuilder = builder
if not self.has_builder():
self.builder_set(builder)
def src_builder(self):
"""Fetch the source code builder for this node.
If there isn't one, we cache the source code builder specified
for the directory (which in turn will cache the value from its
parent directory, and so on up to the file system root).
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.dir.src_builder()
self.sbuilder = scb
return scb
def get_abspath(self):
"""Get the absolute path of the file."""
return self.abspath
def for_signature(self):
# Return just our name. Even an absolute path would not work,
# because that can change thanks to symlinks or remapped network
# paths.
return self.name
def get_subst_proxy(self):
try:
return self._proxy
except AttributeError:
ret = EntryProxy(self)
self._proxy = ret
return ret
def target_from_source(self, prefix, suffix, splitext=SCons.Util.splitext):
"""
Generates a target entry that corresponds to this entry (usually
a source file) with the specified prefix and suffix.
Note that this method can be overridden dynamically for generated
files that need different behavior. See Tool/swig.py for
an example.
"""
return self.dir.Entry(prefix + splitext(self.name)[0] + suffix)
def _Rfindalldirs_key(self, pathlist):
return pathlist
memoizer_counters.append(SCons.Memoize.CountDict('Rfindalldirs', _Rfindalldirs_key))
def Rfindalldirs(self, pathlist):
"""
Return all of the directories for a given path list, including
corresponding "backing" directories in any repositories.
The Node lookups are relative to this Node (typically a
directory), so memoizing result saves cycles from looking
up the same path for each target in a given directory.
"""
try:
memo_dict = self._memo['Rfindalldirs']
except KeyError:
memo_dict = {}
self._memo['Rfindalldirs'] = memo_dict
else:
try:
return memo_dict[pathlist]
except KeyError:
pass
create_dir_relative_to_self = self.Dir
result = []
for path in pathlist:
if isinstance(path, SCons.Node.Node):
result.append(path)
else:
dir = create_dir_relative_to_self(path)
result.extend(dir.get_all_rdirs())
memo_dict[pathlist] = result
return result
def RDirs(self, pathlist):
"""Search for a list of directories in the Repository list."""
cwd = self.cwd or self.fs._cwd
return cwd.Rfindalldirs(pathlist)
memoizer_counters.append(SCons.Memoize.CountValue('rentry'))
def rentry(self):
try:
return self._memo['rentry']
except KeyError:
pass
result = self
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try:
node = dir.entries[norm_name]
except KeyError:
if dir.entry_exists_on_disk(self.name):
result = dir.Entry(self.name)
break
self._memo['rentry'] = result
return result
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
return []
class Entry(Base):
"""This is the class for generic Node.FS entries--that is, things
that could be a File or a Dir, but we're just not sure yet.
Consequently, the methods in this class really exist just to
transform their associated object into the right class when the
time comes, and then call the same-named method in the transformed
class."""
def diskcheck_match(self):
pass
def disambiguate(self, must_exist=None):
"""
"""
if self.isdir():
self.__class__ = Dir
self._morph()
elif self.isfile():
self.__class__ = File
self._morph()
self.clear()
else:
# There was nothing on-disk at this location, so look in
# the src directory.
#
# We can't just use self.srcnode() straight away because
# that would create an actual Node for this file in the src
# directory, and there might not be one. Instead, use the
# dir_on_disk() method to see if there's something on-disk
# with that name, in which case we can go ahead and call
# self.srcnode() to create the right type of entry.
srcdir = self.dir.srcnode()
if srcdir != self.dir and \
srcdir.entry_exists_on_disk(self.name) and \
self.srcnode().isdir():
self.__class__ = Dir
self._morph()
elif must_exist:
msg = "No such file or directory: '%s'" % self.abspath
raise SCons.Errors.UserError(msg)
else:
self.__class__ = File
self._morph()
self.clear()
return self
def rfile(self):
"""We're a generic Entry, but the caller is actually looking for
a File at this point, so morph into one."""
self.__class__ = File
self._morph()
self.clear()
return File.rfile(self)
def scanner_key(self):
return self.get_suffix()
def get_contents(self):
"""Fetch the contents of the entry. Returns the exact binary
contents of the file."""
try:
self = self.disambiguate(must_exist=1)
except SCons.Errors.UserError:
# There was nothing on disk with which to disambiguate
# this entry. Leave it as an Entry, but return a null
# string so calls to get_contents() in emitters and the
# like (e.g. in qt.py) don't have to disambiguate by hand
# or catch the exception.
return ''
else:
return self.get_contents()
def get_text_contents(self):
"""Fetch the decoded text contents of a Unicode encoded Entry.
Since this should return the text contents from the file
system, we check to see into what sort of subclass we should
morph this Entry."""
try:
self = self.disambiguate(must_exist=1)
except SCons.Errors.UserError:
# There was nothing on disk with which to disambiguate
# this entry. Leave it as an Entry, but return a null
# string so calls to get_text_contents() in emitters and
# the like (e.g. in qt.py) don't have to disambiguate by
# hand or catch the exception.
return ''
else:
return self.get_text_contents()
def must_be_same(self, klass):
"""Called to make sure a Node is a Dir. Since we're an
Entry, we can morph into one."""
if self.__class__ is not klass:
self.__class__ = klass
self._morph()
self.clear()
# The following methods can get called before the Taskmaster has
# had a chance to call disambiguate() directly to see if this Entry
# should really be a Dir or a File. We therefore use these to call
# disambiguate() transparently (from our caller's point of view).
#
# Right now, this minimal set of methods has been derived by just
# looking at some of the methods that will obviously be called early
# in any of the various Taskmasters' calling sequences, and then
# empirically figuring out which additional methods are necessary
# to make various tests pass.
def exists(self):
"""Return if the Entry exists. Check the file system to see
what we should turn into first. Assume a file if there's no
directory."""
return self.disambiguate().exists()
def rel_path(self, other):
d = self.disambiguate()
if d.__class__ is Entry:
raise Exception("rel_path() could not disambiguate File/Dir")
return d.rel_path(other)
def new_ninfo(self):
return self.disambiguate().new_ninfo()
def changed_since_last_build(self, target, prev_ni):
return self.disambiguate().changed_since_last_build(target, prev_ni)
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
return self.disambiguate()._glob1(pattern, ondisk, source, strings)
def get_subst_proxy(self):
return self.disambiguate().get_subst_proxy()
# This is for later so we can differentiate between Entry the class and Entry
# the method of the FS class.
_classEntry = Entry
class LocalFS(object):
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
# This class implements an abstraction layer for operations involving
# a local file system. Essentially, this wraps any function in
# the os, os.path or shutil modules that we use to actually go do
# anything with or to the local file system.
#
# Note that there's a very good chance we'll refactor this part of
# the architecture in some way as we really implement the interface(s)
# for remote file system Nodes. For example, the right architecture
# might be to have this be a subclass instead of a base class.
# Nevertheless, we're using this as a first step in that direction.
#
# We're not using chdir() yet because the calling subclass method
# needs to use os.chdir() directly to avoid recursion. Will we
# really need this one?
#def chdir(self, path):
# return os.chdir(path)
def chmod(self, path, mode):
return os.chmod(path, mode)
def copy(self, src, dst):
return shutil.copy(src, dst)
def copy2(self, src, dst):
return shutil.copy2(src, dst)
def exists(self, path):
return os.path.exists(path)
def getmtime(self, path):
return os.path.getmtime(path)
def getsize(self, path):
return os.path.getsize(path)
def isdir(self, path):
return os.path.isdir(path)
def isfile(self, path):
return os.path.isfile(path)
def link(self, src, dst):
return os.link(src, dst)
def lstat(self, path):
return os.lstat(path)
def listdir(self, path):
return os.listdir(path)
def makedirs(self, path):
return os.makedirs(path)
def mkdir(self, path):
return os.mkdir(path)
def rename(self, old, new):
return os.rename(old, new)
def stat(self, path):
return os.stat(path)
def symlink(self, src, dst):
return os.symlink(src, dst)
def open(self, path):
return open(path)
def unlink(self, path):
return os.unlink(path)
if hasattr(os, 'symlink'):
def islink(self, path):
return os.path.islink(path)
else:
def islink(self, path):
return 0 # no symlinks
if hasattr(os, 'readlink'):
def readlink(self, file):
return os.readlink(file)
else:
def readlink(self, file):
return ''
#class RemoteFS:
# # Skeleton for the obvious methods we might need from the
# # abstraction layer for a remote filesystem.
# def upload(self, local_src, remote_dst):
# pass
# def download(self, remote_src, local_dst):
# pass
class FS(LocalFS):
memoizer_counters = []
def __init__(self, path = None):
"""Initialize the Node.FS subsystem.
The supplied path is the top of the source tree, where we
expect to find the top-level build file. If no path is
supplied, the current directory is the default.
The path argument must be a valid absolute path.
"""
if __debug__: logInstanceCreation(self, 'Node.FS')
self._memo = {}
self.Root = {}
self.SConstruct_dir = None
self.max_drift = default_max_drift
self.Top = None
if path is None:
self.pathTop = os.getcwd()
else:
self.pathTop = path
self.defaultDrive = _my_normcase(os.path.splitdrive(self.pathTop)[0])
self.Top = self.Dir(self.pathTop)
self.Top.path = '.'
self.Top.tpath = '.'
self._cwd = self.Top
DirNodeInfo.fs = self
FileNodeInfo.fs = self
def set_SConstruct_dir(self, dir):
self.SConstruct_dir = dir
def get_max_drift(self):
return self.max_drift
def set_max_drift(self, max_drift):
self.max_drift = max_drift
def getcwd(self):
return self._cwd
def chdir(self, dir, change_os_dir=0):
"""Change the current working directory for lookups.
If change_os_dir is true, we will also change the "real" cwd
to match.
"""
curr=self._cwd
try:
if dir is not None:
self._cwd = dir
if change_os_dir:
os.chdir(dir.abspath)
except OSError:
self._cwd = curr
raise
def get_root(self, drive):
"""
Returns the root directory for the specified drive, creating
it if necessary.
"""
drive = _my_normcase(drive)
try:
return self.Root[drive]
except KeyError:
root = RootDir(drive, self)
self.Root[drive] = root
if not drive:
self.Root[self.defaultDrive] = root
elif drive == self.defaultDrive:
self.Root[''] = root
return root
def _lookup(self, p, directory, fsclass, create=1):
"""
The generic entry point for Node lookup with user-supplied data.
This translates arbitrary input into a canonical Node.FS object
of the specified fsclass. The general approach for strings is
to turn it into a fully normalized absolute path and then call
the root directory's lookup_abs() method for the heavy lifting.
If the path name begins with '#', it is unconditionally
interpreted relative to the top-level directory of this FS. '#'
is treated as a synonym for the top-level SConstruct directory,
much like '~' is treated as a synonym for the user's home
directory in a UNIX shell. So both '#foo' and '#/foo' refer
to the 'foo' subdirectory underneath the top-level SConstruct
directory.
If the path name is relative, then the path is looked up relative
to the specified directory, or the current directory (self._cwd,
typically the SConscript directory) if the specified directory
is None.
"""
if isinstance(p, Base):
# It's already a Node.FS object. Make sure it's the right
# class and return.
p.must_be_same(fsclass)
return p
# str(p) in case it's something like a proxy object
p = str(p)
initial_hash = (p[0:1] == '#')
if initial_hash:
# There was an initial '#', so we strip it and override
# whatever directory they may have specified with the
# top-level SConstruct directory.
p = p[1:]
directory = self.Top
if directory and not isinstance(directory, Dir):
directory = self.Dir(directory)
if do_splitdrive:
drive, p = os.path.splitdrive(p)
else:
drive = ''
if drive and not p:
# This causes a naked drive letter to be treated as a synonym
# for the root directory on that drive.
p = os.sep
absolute = os.path.isabs(p)
needs_normpath = needs_normpath_check.match(p)
if initial_hash or not absolute:
# This is a relative lookup, either to the top-level
# SConstruct directory (because of the initial '#') or to
# the current directory (the path name is not absolute).
# Add the string to the appropriate directory lookup path,
# after which the whole thing gets normalized.
if not directory:
directory = self._cwd
if p:
p = directory.labspath + '/' + p
else:
p = directory.labspath
if needs_normpath:
p = os.path.normpath(p)
if drive or absolute:
root = self.get_root(drive)
else:
if not directory:
directory = self._cwd
root = directory.root
if os.sep != '/':
p = p.replace(os.sep, '/')
return root._lookup_abs(p, fsclass, create)
def Entry(self, name, directory = None, create = 1):
"""Look up or create a generic Entry node with the specified name.
If the name is a relative path (begins with ./, ../, or a file
name), then it is looked up relative to the supplied directory
node, or to the top level directory of the FS (supplied at
construction time) if no directory is supplied.
"""
return self._lookup(name, directory, Entry, create)
def File(self, name, directory = None, create = 1):
"""Look up or create a File node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a directory is found at the
specified path.
"""
return self._lookup(name, directory, File, create)
def Dir(self, name, directory = None, create = True):
"""Look up or create a Dir node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a normal file is found at the
specified path.
"""
return self._lookup(name, directory, Dir, create)
def VariantDir(self, variant_dir, src_dir, duplicate=1):
"""Link the supplied variant directory to the source directory
for purposes of building files."""
if not isinstance(src_dir, SCons.Node.Node):
src_dir = self.Dir(src_dir)
if not isinstance(variant_dir, SCons.Node.Node):
variant_dir = self.Dir(variant_dir)
if src_dir.is_under(variant_dir):
raise SCons.Errors.UserError("Source directory cannot be under variant directory.")
if variant_dir.srcdir:
if variant_dir.srcdir == src_dir:
return # We already did this.
raise SCons.Errors.UserError("'%s' already has a source directory: '%s'."%(variant_dir, variant_dir.srcdir))
variant_dir.link(src_dir, duplicate)
def Repository(self, *dirs):
"""Specify Repository directories to search."""
for d in dirs:
if not isinstance(d, SCons.Node.Node):
d = self.Dir(d)
self.Top.addRepository(d)
def variant_dir_target_climb(self, orig, dir, tail):
"""Create targets in corresponding variant directories
Climb the directory tree, and look up path names
relative to any linked variant directories we find.
Even though this loops and walks up the tree, we don't memoize
the return value because this is really only used to process
the command-line targets.
"""
targets = []
message = None
fmt = "building associated VariantDir targets: %s"
start_dir = dir
while dir:
for bd in dir.variant_dirs:
if start_dir.is_under(bd):
# If already in the build-dir location, don't reflect
return [orig], fmt % str(orig)
p = os.path.join(bd.path, *tail)
targets.append(self.Entry(p))
tail = [dir.name] + tail
dir = dir.up()
if targets:
message = fmt % ' '.join(map(str, targets))
return targets, message
def Glob(self, pathname, ondisk=True, source=True, strings=False, cwd=None):
"""
Globs
This is mainly a shim layer
"""
if cwd is None:
cwd = self.getcwd()
return cwd.glob(pathname, ondisk, source, strings)
class DirNodeInfo(SCons.Node.NodeInfoBase):
# This should get reset by the FS initialization.
current_version_id = 1
fs = None
def str_to_node(self, s):
top = self.fs.Top
root = top.root
if do_splitdrive:
drive, s = os.path.splitdrive(s)
if drive:
root = self.fs.get_root(drive)
if not os.path.isabs(s):
s = top.labspath + '/' + s
return root._lookup_abs(s, Entry)
class DirBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
glob_magic_check = re.compile('[*?[]')
def has_glob_magic(s):
return glob_magic_check.search(s) is not None
class Dir(Base):
"""A class for directories in a file system.
"""
memoizer_counters = []
NodeInfo = DirNodeInfo
BuildInfo = DirBuildInfo
def __init__(self, name, directory, fs):
if __debug__: logInstanceCreation(self, 'Node.FS.Dir')
Base.__init__(self, name, directory, fs)
self._morph()
def _morph(self):
"""Turn a file system Node (either a freshly initialized directory
object or a separate Entry object) into a proper directory object.
Set up this directory's entries and hook it into the file
system tree. Specify that directories (this Node) don't use
signatures for calculating whether they're current.
"""
self.repositories = []
self.srcdir = None
self.entries = {}
self.entries['.'] = self
self.entries['..'] = self.dir
self.cwd = self
self.searched = 0
self._sconsign = None
self.variant_dirs = []
self.root = self.dir.root
# Don't just reset the executor, replace its action list,
# because it might have some pre-or post-actions that need to
# be preserved.
self.builder = get_MkdirBuilder()
self.get_executor().set_action_list(self.builder.action)
def diskcheck_match(self):
diskcheck_match(self, self.isfile,
"File %s found where directory expected.")
def __clearRepositoryCache(self, duplicate=None):
"""Called when we change the repository(ies) for a directory.
This clears any cached information that is invalidated by changing
the repository."""
for node in self.entries.values():
if node != self.dir:
if node != self and isinstance(node, Dir):
node.__clearRepositoryCache(duplicate)
else:
node.clear()
try:
del node._srcreps
except AttributeError:
pass
if duplicate is not None:
node.duplicate=duplicate
def __resetDuplicate(self, node):
if node != self:
node.duplicate = node.get_dir().duplicate
def Entry(self, name):
"""
Looks up or creates an entry node named 'name' relative to
this directory.
"""
return self.fs.Entry(name, self)
def Dir(self, name, create=True):
"""
Looks up or creates a directory node named 'name' relative to
this directory.
"""
return self.fs.Dir(name, self, create)
def File(self, name):
"""
Looks up or creates a file node named 'name' relative to
this directory.
"""
return self.fs.File(name, self)
def _lookup_rel(self, name, klass, create=1):
"""
Looks up a *normalized* relative path name, relative to this
directory.
This method is intended for use by internal lookups with
already-normalized path data. For general-purpose lookups,
use the Entry(), Dir() and File() methods above.
This method does *no* input checking and will die or give
incorrect results if it's passed a non-normalized path name (e.g.,
a path containing '..'), an absolute path name, a top-relative
('#foo') path name, or any kind of object.
"""
name = self.entry_labspath(name)
return self.root._lookup_abs(name, klass, create)
def link(self, srcdir, duplicate):
"""Set this directory as the variant directory for the
supplied source directory."""
self.srcdir = srcdir
self.duplicate = duplicate
self.__clearRepositoryCache(duplicate)
srcdir.variant_dirs.append(self)
def getRepositories(self):
"""Returns a list of repositories for this directory.
"""
if self.srcdir and not self.duplicate:
return self.srcdir.get_all_rdirs() + self.repositories
return self.repositories
memoizer_counters.append(SCons.Memoize.CountValue('get_all_rdirs'))
def get_all_rdirs(self):
try:
return list(self._memo['get_all_rdirs'])
except KeyError:
pass
result = [self]
fname = '.'
dir = self
while dir:
for rep in dir.getRepositories():
result.append(rep.Dir(fname))
if fname == '.':
fname = dir.name
else:
fname = dir.name + os.sep + fname
dir = dir.up()
self._memo['get_all_rdirs'] = list(result)
return result
def addRepository(self, dir):
if dir != self and not dir in self.repositories:
self.repositories.append(dir)
dir.tpath = '.'
self.__clearRepositoryCache()
def up(self):
return self.entries['..']
def _rel_path_key(self, other):
return str(other)
memoizer_counters.append(SCons.Memoize.CountDict('rel_path', _rel_path_key))
def rel_path(self, other):
"""Return a path to "other" relative to this directory.
"""
# This complicated and expensive method, which constructs relative
# paths between arbitrary Node.FS objects, is no longer used
# by SCons itself. It was introduced to store dependency paths
# in .sconsign files relative to the target, but that ended up
# being significantly inefficient.
#
# We're continuing to support the method because some SConstruct
# files out there started using it when it was available, and
# we're all about backwards compatibility..
try:
memo_dict = self._memo['rel_path']
except KeyError:
memo_dict = {}
self._memo['rel_path'] = memo_dict
else:
try:
return memo_dict[other]
except KeyError:
pass
if self is other:
result = '.'
elif not other in self.path_elements:
try:
other_dir = other.get_dir()
except AttributeError:
result = str(other)
else:
if other_dir is None:
result = other.name
else:
dir_rel_path = self.rel_path(other_dir)
if dir_rel_path == '.':
result = other.name
else:
result = dir_rel_path + os.sep + other.name
else:
i = self.path_elements.index(other) + 1
path_elems = ['..'] * (len(self.path_elements) - i) \
+ [n.name for n in other.path_elements[i:]]
result = os.sep.join(path_elems)
memo_dict[other] = result
return result
def get_env_scanner(self, env, kw={}):
import SCons.Defaults
return SCons.Defaults.DirEntryScanner
def get_target_scanner(self):
import SCons.Defaults
return SCons.Defaults.DirEntryScanner
def get_found_includes(self, env, scanner, path):
"""Return this directory's implicit dependencies.
We don't bother caching the results because the scan typically
shouldn't be requested more than once (as opposed to scanning
.h file contents, which can be requested as many times as the
files is #included by other files).
"""
if not scanner:
return []
# Clear cached info for this Dir. If we already visited this
# directory on our walk down the tree (because we didn't know at
# that point it was being used as the source for another Node)
# then we may have calculated build signature before realizing
# we had to scan the disk. Now that we have to, though, we need
# to invalidate the old calculated signature so that any node
# dependent on our directory structure gets one that includes
# info about everything on disk.
self.clear()
return scanner(self, env, path)
#
# Taskmaster interface subsystem
#
def prepare(self):
pass
def build(self, **kw):
"""A null "builder" for directories."""
global MkdirBuilder
if self.builder is not MkdirBuilder:
SCons.Node.Node.build(self, **kw)
#
#
#
def _create(self):
"""Create this directory, silently and without worrying about
whether the builder is the default or not."""
listDirs = []
parent = self
while parent:
if parent.exists():
break
listDirs.append(parent)
p = parent.up()
if p is None:
# Don't use while: - else: for this condition because
# if so, then parent is None and has no .path attribute.
raise SCons.Errors.StopError(parent.path)
parent = p
listDirs.reverse()
for dirnode in listDirs:
try:
# Don't call dirnode.build(), call the base Node method
# directly because we definitely *must* create this
# directory. The dirnode.build() method will suppress
# the build if it's the default builder.
SCons.Node.Node.build(dirnode)
dirnode.get_executor().nullify()
# The build() action may or may not have actually
# created the directory, depending on whether the -n
# option was used or not. Delete the _exists and
# _rexists attributes so they can be reevaluated.
dirnode.clear()
except OSError:
pass
def multiple_side_effect_has_builder(self):
global MkdirBuilder
return self.builder is not MkdirBuilder and self.has_builder()
def alter_targets(self):
"""Return any corresponding targets in a variant directory.
"""
return self.fs.variant_dir_target_climb(self, self, [])
def scanner_key(self):
"""A directory does not get scanned."""
return None
def get_text_contents(self):
"""We already emit things in text, so just return the binary
version."""
return self.get_contents()
def get_contents(self):
"""Return content signatures and names of all our children
separated by new-lines. Ensure that the nodes are sorted."""
contents = []
for node in sorted(self.children(), key=lambda t: t.name):
contents.append('%s %s\n' % (node.get_csig(), node.name))
return ''.join(contents)
def get_csig(self):
"""Compute the content signature for Directory nodes. In
general, this is not needed and the content signature is not
stored in the DirNodeInfo. However, if get_contents on a Dir
node is called which has a child directory, the child
directory should return the hash of its contents."""
contents = self.get_contents()
return SCons.Util.MD5signature(contents)
def do_duplicate(self, src):
pass
changed_since_last_build = SCons.Node.Node.state_has_changed
def is_up_to_date(self):
"""If any child is not up-to-date, then this directory isn't,
either."""
if self.builder is not MkdirBuilder and not self.exists():
return 0
up_to_date = SCons.Node.up_to_date
for kid in self.children():
if kid.get_state() > up_to_date:
return 0
return 1
def rdir(self):
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try: node = dir.entries[norm_name]
except KeyError: node = dir.dir_on_disk(self.name)
if node and node.exists() and \
(isinstance(dir, Dir) or isinstance(dir, Entry)):
return node
return self
def sconsign(self):
"""Return the .sconsign file info for this directory,
creating it first if necessary."""
if not self._sconsign:
import SCons.SConsign
self._sconsign = SCons.SConsign.ForDirectory(self)
return self._sconsign
def srcnode(self):
"""Dir has a special need for srcnode()...if we
have a srcdir attribute set, then that *is* our srcnode."""
if self.srcdir:
return self.srcdir
return Base.srcnode(self)
def get_timestamp(self):
"""Return the latest timestamp from among our children"""
stamp = 0
for kid in self.children():
if kid.get_timestamp() > stamp:
stamp = kid.get_timestamp()
return stamp
def entry_abspath(self, name):
return self.abspath + os.sep + name
def entry_labspath(self, name):
return self.labspath + '/' + name
def entry_path(self, name):
return self.path + os.sep + name
def entry_tpath(self, name):
return self.tpath + os.sep + name
def entry_exists_on_disk(self, name):
try:
d = self.on_disk_entries
except AttributeError:
d = {}
try:
entries = os.listdir(self.abspath)
except OSError:
pass
else:
for entry in map(_my_normcase, entries):
d[entry] = True
self.on_disk_entries = d
if sys.platform == 'win32':
name = _my_normcase(name)
result = d.get(name)
if result is None:
# Belt-and-suspenders for Windows: check directly for
# 8.3 file names that don't show up in os.listdir().
result = os.path.exists(self.abspath + os.sep + name)
d[name] = result
return result
else:
return name in d
memoizer_counters.append(SCons.Memoize.CountValue('srcdir_list'))
def srcdir_list(self):
try:
return self._memo['srcdir_list']
except KeyError:
pass
result = []
dirname = '.'
dir = self
while dir:
if dir.srcdir:
result.append(dir.srcdir.Dir(dirname))
dirname = dir.name + os.sep + dirname
dir = dir.up()
self._memo['srcdir_list'] = result
return result
def srcdir_duplicate(self, name):
for dir in self.srcdir_list():
if self.is_under(dir):
# We shouldn't source from something in the build path;
# variant_dir is probably under src_dir, in which case
# we are reflecting.
break
if dir.entry_exists_on_disk(name):
srcnode = dir.Entry(name).disambiguate()
if self.duplicate:
node = self.Entry(name).disambiguate()
node.do_duplicate(srcnode)
return node
else:
return srcnode
return None
def _srcdir_find_file_key(self, filename):
return filename
memoizer_counters.append(SCons.Memoize.CountDict('srcdir_find_file', _srcdir_find_file_key))
def srcdir_find_file(self, filename):
try:
memo_dict = self._memo['srcdir_find_file']
except KeyError:
memo_dict = {}
self._memo['srcdir_find_file'] = memo_dict
else:
try:
return memo_dict[filename]
except KeyError:
pass
def func(node):
if (isinstance(node, File) or isinstance(node, Entry)) and \
(node.is_derived() or node.exists()):
return node
return None
norm_name = _my_normcase(filename)
for rdir in self.get_all_rdirs():
try: node = rdir.entries[norm_name]
except KeyError: node = rdir.file_on_disk(filename)
else: node = func(node)
if node:
result = (node, self)
memo_dict[filename] = result
return result
for srcdir in self.srcdir_list():
for rdir in srcdir.get_all_rdirs():
try: node = rdir.entries[norm_name]
except KeyError: node = rdir.file_on_disk(filename)
else: node = func(node)
if node:
result = (File(filename, self, self.fs), srcdir)
memo_dict[filename] = result
return result
result = (None, None)
memo_dict[filename] = result
return result
def dir_on_disk(self, name):
if self.entry_exists_on_disk(name):
try: return self.Dir(name)
except TypeError: pass
node = self.srcdir_duplicate(name)
if isinstance(node, File):
return None
return node
def file_on_disk(self, name):
if self.entry_exists_on_disk(name) or \
diskcheck_rcs(self, name) or \
diskcheck_sccs(self, name):
try: return self.File(name)
except TypeError: pass
node = self.srcdir_duplicate(name)
if isinstance(node, Dir):
return None
return node
def walk(self, func, arg):
"""
Walk this directory tree by calling the specified function
for each directory in the tree.
This behaves like the os.path.walk() function, but for in-memory
Node.FS.Dir objects. The function takes the same arguments as
the functions passed to os.path.walk():
func(arg, dirname, fnames)
Except that "dirname" will actually be the directory *Node*,
not the string. The '.' and '..' entries are excluded from
fnames. The fnames list may be modified in-place to filter the
subdirectories visited or otherwise impose a specific order.
The "arg" argument is always passed to func() and may be used
in any way (or ignored, passing None is common).
"""
entries = self.entries
names = list(entries.keys())
names.remove('.')
names.remove('..')
func(arg, self, names)
for dirname in [n for n in names if isinstance(entries[n], Dir)]:
entries[dirname].walk(func, arg)
def glob(self, pathname, ondisk=True, source=False, strings=False):
"""
Returns a list of Nodes (or strings) matching a specified
pathname pattern.
Pathname patterns follow UNIX shell semantics: * matches
any-length strings of any characters, ? matches any character,
and [] can enclose lists or ranges of characters. Matches do
not span directory separators.
The matches take into account Repositories, returning local
Nodes if a corresponding entry exists in a Repository (either
an in-memory Node or something on disk).
By defafult, the glob() function matches entries that exist
on-disk, in addition to in-memory Nodes. Setting the "ondisk"
argument to False (or some other non-true value) causes the glob()
function to only match in-memory Nodes. The default behavior is
to return both the on-disk and in-memory Nodes.
The "source" argument, when true, specifies that corresponding
source Nodes must be returned if you're globbing in a build
directory (initialized with VariantDir()). The default behavior
is to return Nodes local to the VariantDir().
The "strings" argument, when true, returns the matches as strings,
not Nodes. The strings are path names relative to this directory.
The underlying algorithm is adapted from the glob.glob() function
in the Python library (but heavily modified), and uses fnmatch()
under the covers.
"""
dirname, basename = os.path.split(pathname)
if not dirname:
return sorted(self._glob1(basename, ondisk, source, strings),
key=lambda t: str(t))
if has_glob_magic(dirname):
list = self.glob(dirname, ondisk, source, strings=False)
else:
list = [self.Dir(dirname, create=True)]
result = []
for dir in list:
r = dir._glob1(basename, ondisk, source, strings)
if strings:
r = [os.path.join(str(dir), x) for x in r]
result.extend(r)
return sorted(result, key=lambda a: str(a))
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
"""
Globs for and returns a list of entry names matching a single
pattern in this directory.
This searches any repositories and source directories for
corresponding entries and returns a Node (or string) relative
to the current directory if an entry is found anywhere.
TODO: handle pattern with no wildcard
"""
search_dir_list = self.get_all_rdirs()
for srcdir in self.srcdir_list():
search_dir_list.extend(srcdir.get_all_rdirs())
selfEntry = self.Entry
names = []
for dir in search_dir_list:
# We use the .name attribute from the Node because the keys of
# the dir.entries dictionary are normalized (that is, all upper
# case) on case-insensitive systems like Windows.
node_names = [ v.name for k, v in dir.entries.items()
if k not in ('.', '..') ]
names.extend(node_names)
if not strings:
# Make sure the working directory (self) actually has
# entries for all Nodes in repositories or variant dirs.
for name in node_names: selfEntry(name)
if ondisk:
try:
disk_names = os.listdir(dir.abspath)
except os.error:
continue
names.extend(disk_names)
if not strings:
# We're going to return corresponding Nodes in
# the local directory, so we need to make sure
# those Nodes exist. We only want to create
# Nodes for the entries that will match the
# specified pattern, though, which means we
# need to filter the list here, even though
# the overall list will also be filtered later,
# after we exit this loop.
if pattern[0] != '.':
#disk_names = [ d for d in disk_names if d[0] != '.' ]
disk_names = [x for x in disk_names if x[0] != '.']
disk_names = fnmatch.filter(disk_names, pattern)
dirEntry = dir.Entry
for name in disk_names:
# Add './' before disk filename so that '#' at
# beginning of filename isn't interpreted.
name = './' + name
node = dirEntry(name).disambiguate()
n = selfEntry(name)
if n.__class__ != node.__class__:
n.__class__ = node.__class__
n._morph()
names = set(names)
if pattern[0] != '.':
#names = [ n for n in names if n[0] != '.' ]
names = [x for x in names if x[0] != '.']
names = fnmatch.filter(names, pattern)
if strings:
return names
#return [ self.entries[_my_normcase(n)] for n in names ]
return [self.entries[_my_normcase(n)] for n in names]
class RootDir(Dir):
"""A class for the root directory of a file system.
This is the same as a Dir class, except that the path separator
('/' or '\\') is actually part of the name, so we don't need to
add a separator when creating the path names of entries within
this directory.
"""
def __init__(self, name, fs):
if __debug__: logInstanceCreation(self, 'Node.FS.RootDir')
# We're going to be our own parent directory (".." entry and .dir
# attribute) so we have to set up some values so Base.__init__()
# won't gag won't it calls some of our methods.
self.abspath = ''
self.labspath = ''
self.path = ''
self.tpath = ''
self.path_elements = []
self.duplicate = 0
self.root = self
Base.__init__(self, name, self, fs)
# Now set our paths to what we really want them to be: the
# initial drive letter (the name) plus the directory separator,
# except for the "lookup abspath," which does not have the
# drive letter.
self.abspath = name + os.sep
self.labspath = ''
self.path = name + os.sep
self.tpath = name + os.sep
self._morph()
self._lookupDict = {}
# The // and os.sep + os.sep entries are necessary because
# os.path.normpath() seems to preserve double slashes at the
# beginning of a path (presumably for UNC path names), but
# collapses triple slashes to a single slash.
self._lookupDict[''] = self
self._lookupDict['/'] = self
self._lookupDict['//'] = self
self._lookupDict[os.sep] = self
self._lookupDict[os.sep + os.sep] = self
def must_be_same(self, klass):
if klass is Dir:
return
Base.must_be_same(self, klass)
def _lookup_abs(self, p, klass, create=1):
"""
Fast (?) lookup of a *normalized* absolute path.
This method is intended for use by internal lookups with
already-normalized path data. For general-purpose lookups,
use the FS.Entry(), FS.Dir() or FS.File() methods.
The caller is responsible for making sure we're passed a
normalized absolute path; we merely let Python's dictionary look
up and return the One True Node.FS object for the path.
If no Node for the specified "p" doesn't already exist, and
"create" is specified, the Node may be created after recursive
invocation to find or create the parent directory or directories.
"""
k = _my_normcase(p)
try:
result = self._lookupDict[k]
except KeyError:
if not create:
msg = "No such file or directory: '%s' in '%s' (and create is False)" % (p, str(self))
raise SCons.Errors.UserError(msg)
# There is no Node for this path name, and we're allowed
# to create it.
dir_name, file_name = os.path.split(p)
dir_node = self._lookup_abs(dir_name, Dir)
result = klass(file_name, dir_node, self.fs)
# Double-check on disk (as configured) that the Node we
# created matches whatever is out there in the real world.
result.diskcheck_match()
self._lookupDict[k] = result
dir_node.entries[_my_normcase(file_name)] = result
dir_node.implicit = None
else:
# There is already a Node for this path name. Allow it to
# complain if we were looking for an inappropriate type.
result.must_be_same(klass)
return result
def __str__(self):
return self.abspath
def entry_abspath(self, name):
return self.abspath + name
def entry_labspath(self, name):
return '/' + name
def entry_path(self, name):
return self.path + name
def entry_tpath(self, name):
return self.tpath + name
def is_under(self, dir):
if self is dir:
return 1
else:
return 0
def up(self):
return None
def get_dir(self):
return None
def src_builder(self):
return _null
class FileNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig', 'timestamp', 'size']
# This should get reset by the FS initialization.
fs = None
def str_to_node(self, s):
top = self.fs.Top
root = top.root
if do_splitdrive:
drive, s = os.path.splitdrive(s)
if drive:
root = self.fs.get_root(drive)
if not os.path.isabs(s):
s = top.labspath + '/' + s
return root._lookup_abs(s, Entry)
class FileBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
def convert_to_sconsign(self):
"""
Converts this FileBuildInfo object for writing to a .sconsign file
This replaces each Node in our various dependency lists with its
usual string representation: relative to the top-level SConstruct
directory, or an absolute path if it's outside.
"""
if os.sep == '/':
node_to_str = str
else:
def node_to_str(n):
try:
s = n.path
except AttributeError:
s = str(n)
else:
s = s.replace(os.sep, '/')
return s
for attr in ['bsources', 'bdepends', 'bimplicit']:
try:
val = getattr(self, attr)
except AttributeError:
pass
else:
setattr(self, attr, list(map(node_to_str, val)))
def convert_from_sconsign(self, dir, name):
"""
Converts a newly-read FileBuildInfo object for in-SCons use
For normal up-to-date checking, we don't have any conversion to
perform--but we're leaving this method here to make that clear.
"""
pass
def prepare_dependencies(self):
"""
Prepares a FileBuildInfo object for explaining what changed
The bsources, bdepends and bimplicit lists have all been
stored on disk as paths relative to the top-level SConstruct
directory. Convert the strings to actual Nodes (for use by the
--debug=explain code and --implicit-cache).
"""
attrs = [
('bsources', 'bsourcesigs'),
('bdepends', 'bdependsigs'),
('bimplicit', 'bimplicitsigs'),
]
for (nattr, sattr) in attrs:
try:
strings = getattr(self, nattr)
nodeinfos = getattr(self, sattr)
except AttributeError:
continue
nodes = []
for s, ni in zip(strings, nodeinfos):
if not isinstance(s, SCons.Node.Node):
s = ni.str_to_node(s)
nodes.append(s)
setattr(self, nattr, nodes)
def format(self, names=0):
result = []
bkids = self.bsources + self.bdepends + self.bimplicit
bkidsigs = self.bsourcesigs + self.bdependsigs + self.bimplicitsigs
for bkid, bkidsig in zip(bkids, bkidsigs):
result.append(str(bkid) + ': ' +
' '.join(bkidsig.format(names=names)))
result.append('%s [%s]' % (self.bactsig, self.bact))
return '\n'.join(result)
class File(Base):
"""A class for files in a file system.
"""
memoizer_counters = []
NodeInfo = FileNodeInfo
BuildInfo = FileBuildInfo
md5_chunksize = 64
def diskcheck_match(self):
diskcheck_match(self, self.isdir,
"Directory %s found where file expected.")
def __init__(self, name, directory, fs):
if __debug__: logInstanceCreation(self, 'Node.FS.File')
Base.__init__(self, name, directory, fs)
self._morph()
def Entry(self, name):
"""Create an entry node named 'name' relative to
the directory of this file."""
return self.dir.Entry(name)
def Dir(self, name, create=True):
"""Create a directory node named 'name' relative to
the directory of this file."""
return self.dir.Dir(name, create=create)
def Dirs(self, pathlist):
"""Create a list of directories relative to the SConscript
directory of this file."""
return [self.Dir(p) for p in pathlist]
def File(self, name):
"""Create a file node named 'name' relative to
the directory of this file."""
return self.dir.File(name)
#def generate_build_dict(self):
# """Return an appropriate dictionary of values for building
# this File."""
# return {'Dir' : self.Dir,
# 'File' : self.File,
# 'RDirs' : self.RDirs}
def _morph(self):
"""Turn a file system node into a File object."""
self.scanner_paths = {}
if not hasattr(self, '_local'):
self._local = 0
# If there was already a Builder set on this entry, then
# we need to make sure we call the target-decider function,
# not the source-decider. Reaching in and doing this by hand
# is a little bogus. We'd prefer to handle this by adding
# an Entry.builder_set() method that disambiguates like the
# other methods, but that starts running into problems with the
# fragile way we initialize Dir Nodes with their Mkdir builders,
# yet still allow them to be overridden by the user. Since it's
# not clear right now how to fix that, stick with what works
# until it becomes clear...
if self.has_builder():
self.changed_since_last_build = self.decide_target
def scanner_key(self):
return self.get_suffix()
def get_contents(self):
if not self.rexists():
return ''
fname = self.rfile().abspath
try:
contents = open(fname, "rb").read()
except EnvironmentError, e:
if not e.filename:
e.filename = fname
raise
return contents
# This attempts to figure out what the encoding of the text is
# based upon the BOM bytes, and then decodes the contents so that
# it's a valid python string.
def get_text_contents(self):
contents = self.get_contents()
# The behavior of various decode() methods and functions
# w.r.t. the initial BOM bytes is different for different
# encodings and/or Python versions. ('utf-8' does not strip
# them, but has a 'utf-8-sig' which does; 'utf-16' seems to
# strip them; etc.) Just sidestep all the complication by
# explicitly stripping the BOM before we decode().
if contents.startswith(codecs.BOM_UTF8):
return contents[len(codecs.BOM_UTF8):].decode('utf-8')
if contents.startswith(codecs.BOM_UTF16_LE):
return contents[len(codecs.BOM_UTF16_LE):].decode('utf-16-le')
if contents.startswith(codecs.BOM_UTF16_BE):
return contents[len(codecs.BOM_UTF16_BE):].decode('utf-16-be')
return contents
def get_content_hash(self):
"""
Compute and return the MD5 hash for this file.
"""
if not self.rexists():
return SCons.Util.MD5signature('')
fname = self.rfile().abspath
try:
cs = SCons.Util.MD5filesignature(fname,
chunksize=SCons.Node.FS.File.md5_chunksize*1024)
except EnvironmentError, e:
if not e.filename:
e.filename = fname
raise
return cs
memoizer_counters.append(SCons.Memoize.CountValue('get_size'))
def get_size(self):
try:
return self._memo['get_size']
except KeyError:
pass
if self.rexists():
size = self.rfile().getsize()
else:
size = 0
self._memo['get_size'] = size
return size
memoizer_counters.append(SCons.Memoize.CountValue('get_timestamp'))
def get_timestamp(self):
try:
return self._memo['get_timestamp']
except KeyError:
pass
if self.rexists():
timestamp = self.rfile().getmtime()
else:
timestamp = 0
self._memo['get_timestamp'] = timestamp
return timestamp
def store_info(self):
# Merge our build information into the already-stored entry.
# This accomodates "chained builds" where a file that's a target
# in one build (SConstruct file) is a source in a different build.
# See test/chained-build.py for the use case.
if do_store_info:
self.dir.sconsign().store_info(self.name, self)
convert_copy_attrs = [
'bsources',
'bimplicit',
'bdepends',
'bact',
'bactsig',
'ninfo',
]
convert_sig_attrs = [
'bsourcesigs',
'bimplicitsigs',
'bdependsigs',
]
def convert_old_entry(self, old_entry):
# Convert a .sconsign entry from before the Big Signature
# Refactoring, doing what we can to convert its information
# to the new .sconsign entry format.
#
# The old format looked essentially like this:
#
# BuildInfo
# .ninfo (NodeInfo)
# .bsig
# .csig
# .timestamp
# .size
# .bsources
# .bsourcesigs ("signature" list)
# .bdepends
# .bdependsigs ("signature" list)
# .bimplicit
# .bimplicitsigs ("signature" list)
# .bact
# .bactsig
#
# The new format looks like this:
#
# .ninfo (NodeInfo)
# .bsig
# .csig
# .timestamp
# .size
# .binfo (BuildInfo)
# .bsources
# .bsourcesigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bdepends
# .bdependsigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bimplicit
# .bimplicitsigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bact
# .bactsig
#
# The basic idea of the new structure is that a NodeInfo always
# holds all available information about the state of a given Node
# at a certain point in time. The various .b*sigs lists can just
# be a list of pointers to the .ninfo attributes of the different
# dependent nodes, without any copying of information until it's
# time to pickle it for writing out to a .sconsign file.
#
# The complicating issue is that the *old* format only stored one
# "signature" per dependency, based on however the *last* build
# was configured. We don't know from just looking at it whether
# it was a build signature, a content signature, or a timestamp
# "signature". Since we no longer use build signatures, the
# best we can do is look at the length and if it's thirty two,
# assume that it was (or might have been) a content signature.
# If it was actually a build signature, then it will cause a
# rebuild anyway when it doesn't match the new content signature,
# but that's probably the best we can do.
import SCons.SConsign
new_entry = SCons.SConsign.SConsignEntry()
new_entry.binfo = self.new_binfo()
binfo = new_entry.binfo
for attr in self.convert_copy_attrs:
try:
value = getattr(old_entry, attr)
except AttributeError:
continue
setattr(binfo, attr, value)
delattr(old_entry, attr)
for attr in self.convert_sig_attrs:
try:
sig_list = getattr(old_entry, attr)
except AttributeError:
continue
value = []
for sig in sig_list:
ninfo = self.new_ninfo()
if len(sig) == 32:
ninfo.csig = sig
else:
ninfo.timestamp = sig
value.append(ninfo)
setattr(binfo, attr, value)
delattr(old_entry, attr)
return new_entry
memoizer_counters.append(SCons.Memoize.CountValue('get_stored_info'))
def get_stored_info(self):
try:
return self._memo['get_stored_info']
except KeyError:
pass
try:
sconsign_entry = self.dir.sconsign().get_entry(self.name)
except (KeyError, EnvironmentError):
import SCons.SConsign
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = self.new_binfo()
sconsign_entry.ninfo = self.new_ninfo()
else:
if isinstance(sconsign_entry, FileBuildInfo):
# This is a .sconsign file from before the Big Signature
# Refactoring; convert it as best we can.
sconsign_entry = self.convert_old_entry(sconsign_entry)
try:
delattr(sconsign_entry.ninfo, 'bsig')
except AttributeError:
pass
self._memo['get_stored_info'] = sconsign_entry
return sconsign_entry
def get_stored_implicit(self):
binfo = self.get_stored_info().binfo
binfo.prepare_dependencies()
try: return binfo.bimplicit
except AttributeError: return None
def rel_path(self, other):
return self.dir.rel_path(other)
def _get_found_includes_key(self, env, scanner, path):
return (id(env), id(scanner), path)
memoizer_counters.append(SCons.Memoize.CountDict('get_found_includes', _get_found_includes_key))
def get_found_includes(self, env, scanner, path):
"""Return the included implicit dependencies in this file.
Cache results so we only scan the file once per path
regardless of how many times this information is requested.
"""
memo_key = (id(env), id(scanner), path)
try:
memo_dict = self._memo['get_found_includes']
except KeyError:
memo_dict = {}
self._memo['get_found_includes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
if scanner:
# result = [n.disambiguate() for n in scanner(self, env, path)]
result = scanner(self, env, path)
result = [N.disambiguate() for N in result]
else:
result = []
memo_dict[memo_key] = result
return result
def _createDir(self):
# ensure that the directories for this node are
# created.
self.dir._create()
def push_to_cache(self):
"""Try to push the node into a cache
"""
# This should get called before the Nodes' .built() method is
# called, which would clear the build signature if the file has
# a source scanner.
#
# We have to clear the local memoized values *before* we push
# the node to cache so that the memoization of the self.exists()
# return value doesn't interfere.
if self.nocache:
return
self.clear_memoized_values()
if self.exists():
self.get_build_env().get_CacheDir().push(self)
def retrieve_from_cache(self):
"""Try to retrieve the node's content from a cache
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Returns true iff the node was successfully retrieved.
"""
if self.nocache:
return None
if not self.is_derived():
return None
return self.get_build_env().get_CacheDir().retrieve(self)
def visited(self):
if self.exists():
self.get_build_env().get_CacheDir().push_if_forced(self)
ninfo = self.get_ninfo()
csig = self.get_max_drift_csig()
if csig:
ninfo.csig = csig
ninfo.timestamp = self.get_timestamp()
ninfo.size = self.get_size()
if not self.has_builder():
# This is a source file, but it might have been a target file
# in another build that included more of the DAG. Copy
# any build information that's stored in the .sconsign file
# into our binfo object so it doesn't get lost.
old = self.get_stored_info()
self.get_binfo().__dict__.update(old.binfo.__dict__)
self.store_info()
def find_src_builder(self):
if self.rexists():
return None
scb = self.dir.src_builder()
if scb is _null:
if diskcheck_sccs(self.dir, self.name):
scb = get_DefaultSCCSBuilder()
elif diskcheck_rcs(self.dir, self.name):
scb = get_DefaultRCSBuilder()
else:
scb = None
if scb is not None:
try:
b = self.builder
except AttributeError:
b = None
if b is None:
self.builder_set(scb)
return scb
def has_src_builder(self):
"""Return whether this Node has a source builder or not.
If this Node doesn't have an explicit source code builder, this
is where we figure out, on the fly, if there's a transparent
source code builder for it.
Note that if we found a source builder, we also set the
self.builder attribute, so that all of the methods that actually
*build* this file don't have to do anything different.
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.sbuilder = self.find_src_builder()
return scb is not None
def alter_targets(self):
"""Return any corresponding targets in a variant directory.
"""
if self.is_derived():
return [], None
return self.fs.variant_dir_target_climb(self, self.dir, [self.name])
def _rmv_existing(self):
self.clear_memoized_values()
e = Unlink(self, [], None)
if isinstance(e, SCons.Errors.BuildError):
raise e
#
# Taskmaster interface subsystem
#
def make_ready(self):
self.has_src_builder()
self.get_binfo()
def prepare(self):
"""Prepare for this file to be created."""
SCons.Node.Node.prepare(self)
if self.get_state() != SCons.Node.up_to_date:
if self.exists():
if self.is_derived() and not self.precious:
self._rmv_existing()
else:
try:
self._createDir()
except SCons.Errors.StopError, drive:
desc = "No drive `%s' for target `%s'." % (drive, self)
raise SCons.Errors.StopError(desc)
#
#
#
def remove(self):
"""Remove this file."""
if self.exists() or self.islink():
self.fs.unlink(self.path)
return 1
return None
def do_duplicate(self, src):
self._createDir()
Unlink(self, None, None)
e = Link(self, src, None)
if isinstance(e, SCons.Errors.BuildError):
desc = "Cannot duplicate `%s' in `%s': %s." % (src.path, self.dir.path, e.errstr)
raise SCons.Errors.StopError(desc)
self.linked = 1
# The Link() action may or may not have actually
# created the file, depending on whether the -n
# option was used or not. Delete the _exists and
# _rexists attributes so they can be reevaluated.
self.clear()
memoizer_counters.append(SCons.Memoize.CountValue('exists'))
def exists(self):
try:
return self._memo['exists']
except KeyError:
pass
# Duplicate from source path if we are set up to do this.
if self.duplicate and not self.is_derived() and not self.linked:
src = self.srcnode()
if src is not self:
# At this point, src is meant to be copied in a variant directory.
src = src.rfile()
if src.abspath != self.abspath:
if src.exists():
self.do_duplicate(src)
# Can't return 1 here because the duplication might
# not actually occur if the -n option is being used.
else:
# The source file does not exist. Make sure no old
# copy remains in the variant directory.
if Base.exists(self) or self.islink():
self.fs.unlink(self.path)
# Return None explicitly because the Base.exists() call
# above will have cached its value if the file existed.
self._memo['exists'] = None
return None
result = Base.exists(self)
self._memo['exists'] = result
return result
#
# SIGNATURE SUBSYSTEM
#
def get_max_drift_csig(self):
"""
Returns the content signature currently stored for this node
if it's been unmodified longer than the max_drift value, or the
max_drift value is 0. Returns None otherwise.
"""
old = self.get_stored_info()
mtime = self.get_timestamp()
max_drift = self.fs.max_drift
if max_drift > 0:
if (time.time() - mtime) > max_drift:
try:
n = old.ninfo
if n.timestamp and n.csig and n.timestamp == mtime:
return n.csig
except AttributeError:
pass
elif max_drift == 0:
try:
return old.ninfo.csig
except AttributeError:
pass
return None
def get_csig(self):
"""
Generate a node's content signature, the digested signature
of its content.
node - the node
cache - alternate node to use for the signature cache
returns - the content signature
"""
ninfo = self.get_ninfo()
try:
return ninfo.csig
except AttributeError:
pass
csig = self.get_max_drift_csig()
if csig is None:
try:
if self.get_size() < SCons.Node.FS.File.md5_chunksize:
contents = self.get_contents()
else:
csig = self.get_content_hash()
except IOError:
# This can happen if there's actually a directory on-disk,
# which can be the case if they've disabled disk checks,
# or if an action with a File target actually happens to
# create a same-named directory by mistake.
csig = ''
else:
if not csig:
csig = SCons.Util.MD5signature(contents)
ninfo.csig = csig
return csig
#
# DECISION SUBSYSTEM
#
def builder_set(self, builder):
SCons.Node.Node.builder_set(self, builder)
self.changed_since_last_build = self.decide_target
def changed_content(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def changed_state(self, target, prev_ni):
return self.state != SCons.Node.up_to_date
def changed_timestamp_then_content(self, target, prev_ni):
if not self.changed_timestamp_match(target, prev_ni):
try:
self.get_ninfo().csig = prev_ni.csig
except AttributeError:
pass
return False
return self.changed_content(target, prev_ni)
def changed_timestamp_newer(self, target, prev_ni):
try:
return self.get_timestamp() > target.get_timestamp()
except AttributeError:
return 1
def changed_timestamp_match(self, target, prev_ni):
try:
return self.get_timestamp() != prev_ni.timestamp
except AttributeError:
return 1
def decide_source(self, target, prev_ni):
return target.get_build_env().decide_source(self, target, prev_ni)
def decide_target(self, target, prev_ni):
return target.get_build_env().decide_target(self, target, prev_ni)
# Initialize this Node's decider function to decide_source() because
# every file is a source file until it has a Builder attached...
changed_since_last_build = decide_source
def is_up_to_date(self):
T = 0
if T: Trace('is_up_to_date(%s):' % self)
if not self.exists():
if T: Trace(' not self.exists():')
# The file doesn't exist locally...
r = self.rfile()
if r != self:
# ...but there is one in a Repository...
if not self.changed(r):
if T: Trace(' changed(%s):' % r)
# ...and it's even up-to-date...
if self._local:
# ...and they'd like a local copy.
e = LocalCopy(self, r, None)
if isinstance(e, SCons.Errors.BuildError):
raise
self.store_info()
if T: Trace(' 1\n')
return 1
self.changed()
if T: Trace(' None\n')
return None
else:
r = self.changed()
if T: Trace(' self.exists(): %s\n' % r)
return not r
memoizer_counters.append(SCons.Memoize.CountValue('rfile'))
def rfile(self):
try:
return self._memo['rfile']
except KeyError:
pass
result = self
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try: node = dir.entries[norm_name]
except KeyError: node = dir.file_on_disk(self.name)
if node and node.exists() and \
(isinstance(node, File) or isinstance(node, Entry) \
or not node.is_derived()):
result = node
# Copy over our local attributes to the repository
# Node so we identify shared object files in the
# repository and don't assume they're static.
#
# This isn't perfect; the attribute would ideally
# be attached to the object in the repository in
# case it was built statically in the repository
# and we changed it to shared locally, but that's
# rarely the case and would only occur if you
# intentionally used the same suffix for both
# shared and static objects anyway. So this
# should work well in practice.
result.attributes = self.attributes
break
self._memo['rfile'] = result
return result
def rstr(self):
return str(self.rfile())
def get_cachedir_csig(self):
"""
Fetch a Node's content signature for purposes of computing
another Node's cachesig.
This is a wrapper around the normal get_csig() method that handles
the somewhat obscure case of using CacheDir with the -n option.
Any files that don't exist would normally be "built" by fetching
them from the cache, but the normal get_csig() method will try
to open up the local file, which doesn't exist because the -n
option meant we didn't actually pull the file from cachedir.
But since the file *does* actually exist in the cachedir, we
can use its contents for the csig.
"""
try:
return self.cachedir_csig
except AttributeError:
pass
cachedir, cachefile = self.get_build_env().get_CacheDir().cachepath(self)
if not self.exists() and cachefile and os.path.exists(cachefile):
self.cachedir_csig = SCons.Util.MD5filesignature(cachefile, \
SCons.Node.FS.File.md5_chunksize * 1024)
else:
self.cachedir_csig = self.get_csig()
return self.cachedir_csig
def get_cachedir_bsig(self):
try:
return self.cachesig
except AttributeError:
pass
# Add the path to the cache signature, because multiple
# targets built by the same action will all have the same
# build signature, and we have to differentiate them somehow.
children = self.children()
executor = self.get_executor()
# sigs = [n.get_cachedir_csig() for n in children]
sigs = [n.get_cachedir_csig() for n in children]
sigs.append(SCons.Util.MD5signature(executor.get_contents()))
sigs.append(self.path)
result = self.cachesig = SCons.Util.MD5collect(sigs)
return result
default_fs = None
def get_default_fs():
global default_fs
if not default_fs:
default_fs = FS()
return default_fs
class FileFinder(object):
"""
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self):
self._memo = {}
def filedir_lookup(self, p, fd=None):
"""
A helper method for find_file() that looks up a directory for
a file we're trying to find. This only creates the Dir Node if
it exists on-disk, since if the directory doesn't exist we know
we won't find any files in it... :-)
It would be more compact to just use this as a nested function
with a default keyword argument (see the commented-out version
below), but that doesn't work unless you have nested scopes,
so we define it here just so this work under Python 1.5.2.
"""
if fd is None:
fd = self.default_filedir
dir, name = os.path.split(fd)
drive, d = os.path.splitdrive(dir)
if not name and d[:1] in ('/', os.sep):
#return p.fs.get_root(drive).dir_on_disk(name)
return p.fs.get_root(drive)
if dir:
p = self.filedir_lookup(p, dir)
if not p:
return None
norm_name = _my_normcase(name)
try:
node = p.entries[norm_name]
except KeyError:
return p.dir_on_disk(name)
if isinstance(node, Dir):
return node
if isinstance(node, Entry):
node.must_be_same(Dir)
return node
return None
def _find_file_key(self, filename, paths, verbose=None):
return (filename, paths)
memoizer_counters.append(SCons.Memoize.CountDict('find_file', _find_file_key))
def find_file(self, filename, paths, verbose=None):
"""
find_file(str, [Dir()]) -> [nodes]
filename - a filename to find
paths - a list of directory path *nodes* to search in. Can be
represented as a list, a tuple, or a callable that is
called with no arguments and returns the list or tuple.
returns - the node created from the found file.
Find a node corresponding to either a derived file or a file
that exists already.
Only the first file found is returned, and none is returned
if no file is found.
"""
memo_key = self._find_file_key(filename, paths)
try:
memo_dict = self._memo['find_file']
except KeyError:
memo_dict = {}
self._memo['find_file'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
if verbose and not callable(verbose):
if not SCons.Util.is_String(verbose):
verbose = "find_file"
_verbose = u' %s: ' % verbose
verbose = lambda s: sys.stdout.write(_verbose + s)
filedir, filename = os.path.split(filename)
if filedir:
# More compact code that we can't use until we drop
# support for Python 1.5.2:
#
#def filedir_lookup(p, fd=filedir):
# """
# A helper function that looks up a directory for a file
# we're trying to find. This only creates the Dir Node
# if it exists on-disk, since if the directory doesn't
# exist we know we won't find any files in it... :-)
# """
# dir, name = os.path.split(fd)
# if dir:
# p = filedir_lookup(p, dir)
# if not p:
# return None
# norm_name = _my_normcase(name)
# try:
# node = p.entries[norm_name]
# except KeyError:
# return p.dir_on_disk(name)
# if isinstance(node, Dir):
# return node
# if isinstance(node, Entry):
# node.must_be_same(Dir)
# return node
# if isinstance(node, Dir) or isinstance(node, Entry):
# return node
# return None
#paths = [_f for _f in map(filedir_lookup, paths) if _f]
self.default_filedir = filedir
paths = [_f for _f in map(self.filedir_lookup, paths) if _f]
result = None
for dir in paths:
if verbose:
verbose("looking for '%s' in '%s' ...\n" % (filename, dir))
node, d = dir.srcdir_find_file(filename)
if node:
if verbose:
verbose("... FOUND '%s' in '%s'\n" % (filename, d))
result = node
break
memo_dict[memo_key] = result
return result
find_file = FileFinder().find_file
def invalidate_node_memos(targets):
"""
Invalidate the memoized values of all Nodes (files or directories)
that are associated with the given entries. Has been added to
clear the cache of nodes affected by a direct execution of an
action (e.g. Delete/Copy/Chmod). Existing Node caches become
inconsistent if the action is run through Execute(). The argument
`targets` can be a single Node object or filename, or a sequence
of Nodes/filenames.
"""
from traceback import extract_stack
# First check if the cache really needs to be flushed. Only
# actions run in the SConscript with Execute() seem to be
# affected. XXX The way to check if Execute() is in the stacktrace
# is a very dirty hack and should be replaced by a more sensible
# solution.
for f in extract_stack():
if f[2] == 'Execute' and f[0][-14:] == 'Environment.py':
break
else:
# Dont have to invalidate, so return
return
if not SCons.Util.is_List(targets):
targets = [targets]
for entry in targets:
# If the target is a Node object, clear the cache. If it is a
# filename, look up potentially existing Node object first.
try:
entry.clear_memoized_values()
except AttributeError:
# Not a Node object, try to look up Node by filename. XXX
# This creates Node objects even for those filenames which
# do not correspond to an existing Node object.
node = get_default_fs().Entry(entry)
if node:
node.clear_memoized_values()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
ivan73/smarthome | plugins/logo/__init__.py | 1 | 25174 | #!/usr/bin/env python3
# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab
#########################################################################
# Copyright 2013 KNX-User-Forum e.V. http://knx-user-forum.de/
#########################################################################
# This file is part of SmartHome.py. http://mknx.github.io/smarthome/
#
# SmartHome.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHome.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHome.py. If not, see <http://www.gnu.org/licenses/>.
#########################################################################
import ctypes
import os
import string
import time
import logging
import threading
logger = logging.getLogger('')
class LOGO:
def __init__(self, smarthome, io_wait=5, host='192.168.0.76', port=102, version='0BA7'):
self.host = str(host).encode('ascii')
self.port = int(port)
self._version = version
self._io_wait = float(io_wait)
self._sock = False
self._lock = threading.Lock()
self.connected = False
self._connection_attempts = 0
self._connection_errorlog = 2
self._sh = smarthome
self._vmIO = 923 # lesen der I Q M AI AQ AM ab VM-Adresse VM923
self._vmIO_len = 60 # Anzahl der zu lesenden Bytes 60
self._vm = 0 # lesen der VM ab VM-Adresse VM0
self._vm_len = 850 # Anzahl der zu lesenden Bytes VM0-850
self.tableVM_IO = { # Address-Tab der I,Q,M,AI,AQ,AM im PLC-VM-Buffer
'I': {'VMaddr': 923, 'bytes': 3, 'bits': 24},
'Q': {'VMaddr': 942, 'bytes': 2, 'bits': 16},
'M': {'VMaddr': 948, 'bytes': 3, 'bits': 27},
'AI': {'VMaddr': 926, 'words': 8},
'AQ': {'VMaddr': 944, 'words': 2},
'AM': {'VMaddr': 952, 'words': 16},
'VM': {'VMaddr': 0, 'bytes': 850}}
# Hardware Version 0BA8
self._vmIO_0BA8 = 1024 # lesen der I Q M AI AQ AM ab VM-Adresse VM1024
self._vmIO_len_0BA8 = 445 # Anzahl der zu lesenden Bytes 445
self.table_VM_IO_0BA8 = { # Address-Tab der I,Q,M,AI,AQ,AM im PLC-VM-Buffer
'I': {'VMaddr': 1024, 'bytes': 8, 'bits': 64},
'Q': {'VMaddr': 1064, 'bytes': 8, 'bits': 64},
'M': {'VMaddr': 1104, 'bytes': 14, 'bits': 112},
'AI': {'VMaddr': 1032, 'words': 16},
'AQ': {'VMaddr': 1072, 'words': 16},
'AM': {'VMaddr': 1118, 'words': 64},
'NI': {'VMaddr': 1256, 'bytes': 16, 'bits':128},
'NAI': {'VMaddr': 1262, 'words': 64},
'NQ': {'VMaddr': 1390, 'bytes': 16, 'bits': 128},
'NAQ': {'VMaddr': 1406, 'words': 32},
'VM': {'VMaddr': 0, 'bytes': 850}}
if self._version == '0BA8':
self.tableVM_IO = self.table_VM_IO_0BA8
self._vmIO = self._vmIO_0BA8
self._vmIO_len = self._vmIO_len_0BA8
# End Hardware Version 0BA8
self.reads = {}
self.writes = {}
self.Dateipfad = '/lib' # Dateipfad zur Bibliothek
self.threadLastRead = 0 # verstrichene Zeit zwischen zwei LeseBefehlen
smarthome.connections.monitor(self) # damit connect ausgeführt wird
# libnodave Parameter zum lesen aus LOGO
self.ph = 0 # Porthandle
self.di = 0 # Dave Interface Handle
self.dc = 0
self.res = 1
self.rack = 1
self.slot = 0
self.mpi = 2
self.dave = ""
self.daveDB = 132
self.daveFlags = 131
self.daveOutputs = 130
self.daveInputs = 129
self.timeOut = 5000000
# Öffnen der Verbindung zur LOGO
def connect(self):
self._lock.acquire()
try:
logger.info('LOGO: Try open connection {0}:{1} '.format(self.host, self.port))
if os.name == 'nt':
DLL_LOC = self.Dateipfad + '/' + ('libnodave.dll')
self.dave = ctypes.cdll.LoadLibrary(DLL_LOC)
if os.name == 'posix':
DLL_LOC = self.Dateipfad + '/' + ('libnodave.so')
self.dave = ctypes.cdll.LoadLibrary(DLL_LOC)
#logger.info('LOGO: DLL-Path: {0}, operating system {1}'.format(DLL_LOC, os.name))
self.ph = self.dave.openSocket(self.port, self.host)
if self.ph < 0:
raise LOGO('Port Handle N.O.K.')
# Dave Interface handle
self.di = self.dave.daveNewInterface(self.ph, self.ph, 'IF1', 0, 122, 2)
#logger.info('LOGO: - Dave Interface Handle: {0}'.format(self.di))
# Init Adapter
self.res = self.dave.daveInitAdapter(self.di)
if self.res is not 0:
raise LOGO('Init Adapter N.O.K.')
# dave Connection
self.dc = self.dave.daveNewConnection(self.di, self.mpi, self.rack, self.slot)
#logger.info('LOGO: - Dave Connection: {0}'.format(self.dc))
self.res = self.dave.daveConnectPLC(self.dc)
self.dave.daveSetTimeout(self.di, self.timeOut)
if self.res < 0:
raise LOGO('connection result:{0} '.format(self.res))
except Exception as e:
self._connection_attempts -= 1
if self._connection_attempts <= 0:
logger.error('LOGO: could not connect to {0}:{1}: {2}'.format(self.host, self.port, e))
self._connection_attempts = self._connection_errorlog
self._lock.release()
return
else:
self.connected = True
logger.info('LOGO: connected to {0}:{1}'.format(self.host, self.port))
self._connection_attempts = 0
self._lock.release()
def run(self):
self.alive = True
self._write_read_loop()
def stop(self):
self.alive = False
self.close()
def _write_read_loop(self):
threading.currentThread().name = 'logo_cycle'
logger.debug("LOGO: Starting write-read cycle")
while self.alive:
start = time.time()
t = start - self.threadLastRead
if len(self.writes) > 0: # beim Schreiben sofort schreiben
self._write_cycle()
self._read_cycle()
cycletime = time.time() - start
#logger.debug("LOGO: logo_cycle takes {0} seconds".format(cycletime))
self.threadLastRead = time.time()
elif t > self._io_wait: # erneutes Lesen erst wenn Zeit um ist
self._read_cycle()
cycletime = time.time() - start
#logger.debug("LOGO: logo_cycle takes {0} seconds. Last read: {1}".format(cycletime, t))
self.threadLastRead = time.time()
def _write_cycle(self):
if not self.connected:
return
try:
remove = [] # Liste der bereits geschriebenen Items
for k, v in self.writes.items(): # zu schreibend Items I1,Q1,M1, AI1, AQ1, AM1, VM850, VM850.2, VMW0
#logger.debug('LOGO: write_cycle() {0} : {1} '.format(k, v))
typ = v['typ'] # z.B. I Q M AI AQ AM VM VMW
value = v['value']
write_res = -1
if typ in ['I', 'Q', 'M', 'NI', 'NQ']: # I1 Q1 M1
if value is True:
#logger.debug("LOGO: set {0} : {1} : {2} ".format(k, v, value))
write_res = self.set_vm_bit(v['VMaddr'], v['VMbit']) # Setzen
else:
#logger.debug("LOGO: clear {0} : {1} : {2} ".format(k, v, value))
write_res = self.clear_vm_bit(v['VMaddr'], v['VMbit']) # Löschen
elif typ in ['AI', 'AQ', 'AM', 'NAI', 'NAQ', 'VMW']: # AI1 AQ1 AM1 VMW
write_res = self.write_vm_word(v['VMaddr'], value)
elif typ == 'VM': # VM0 VM10.6
if 'VMbit' in v: # VM10.6
if value is True:
write_res = self.set_vm_bit(v['VMaddr'], v['VMbit'])
else:
write_res = self.clear_vm_bit(v['VMaddr'], v['VMbit'])
else: # VM0
write_res = self.write_vm_byte(v['VMaddr'], value)
else:
raise LOGO('invalid typ: {1}'.format(typ))
if write_res is not 0:
raise LOGO('LOGO: write failed: {0} {1} '.format(typ, value))
self.close()
else:
logger.debug("LOGO: write {0} : {1} : {2} ".format(k, value, v))
remove.append(k) # nach dem Übertragen aus der Liste write entfernen
except Exception as e:
logger.error('LOGO: write_cycle(){0} write error {1} '.format(k, e))
return
for k in remove: # nach dem Übertragen aus der Liste writes entfernen - damit das Item nur 1x übertragen wird
del self.writes[k]
def _read_cycle(self):
if not self.connected:
return
try:
pBuf_VMIO = ctypes.create_string_buffer(self._vmIO_len)
buf_VMIO_p = ctypes.pointer(pBuf_VMIO) # LesebufferIO
pBuf_VM = ctypes.create_string_buffer(self._vm_len)
buf_VM_p = ctypes.pointer(pBuf_VM) # LesebufferVM
# lesen der I Q M AI AQ AM
resVMIO = self.dave.daveReadManyBytes(self.dc, self.daveDB, 1, self._vmIO, self._vmIO_len, buf_VMIO_p)
if resVMIO is not 0:
logger.error('LOGO: read_cycle() failed ro read VM_IO-Buffer daveReadManyBytes')
self.close()
return
if not self.connected:
return
# lesen der VM
resVM = self.dave.daveReadManyBytes(self.dc, self.daveDB, 1, self._vm, self._vm_len, buf_VM_p)
if resVM is not 0:
logger.error('LOGO: read_cycle() failed ro read VM-Buffer daveReadManyBytes')
self.close()
return
if not self.connected:
return
# prüfe Buffer auf Änderung
for k, v in self.reads.items():
#logger.debug('LOGO: read_cycle() {0} : {1} '.format(k, v))
new_value = 0
item = v['item']
if v['DataType'] == 'byte':
new_value = ord(pBuf_VM[v['VMaddr'] - self._vm]) # VM byte z.B. VM0
elif v['DataType'] == 'word':
#logger.debug('LOGO: read_cycle() h{0} : l{1} '.format(pBuf_VM[v['VMaddr']-self._vm], pBuf_VM[v['VMaddr']+1-self._vm]))
if v['typ'] == 'VMW': # VMW word z.B. VMW0
h = ord(pBuf_VM[v['VMaddr'] - self._vm])
l = ord(pBuf_VM[v['VMaddr'] + 1 - self._vm])
else: # AI AQ AM word z.B, AM1
h = ord(pBuf_VMIO[v['VMaddr'] - self._vmIO])
l = ord(pBuf_VMIO[v['VMaddr'] + 1 - self._vmIO])
new_value = l + (h << 8)
elif v['DataType'] == 'bit':
if v['typ'] == 'VM': # VM bit z.B.VM10.6
new_byte = ord(pBuf_VM[v['VMaddr'] - self._vm])
else: # I Q M bit z.B. M1
new_byte = ord(pBuf_VMIO[v['VMaddr'] - self._vmIO])
new_value = self.get_bit(new_byte, v['VMbit'])
else:
raise LOGO('{0} invalid DataType in reads: {1}'.format(k, v['DataType']))
if 'old' in v: # Variable wurde schon einmal gelesen
if v['old'] != new_value: # Variable hat sich geändert
logger.debug("LOGO: read_cycle():{0} newV:{1} oldV:{2} item:{3} ".format(k, new_value, v['old'], v['item']))
item(new_value) # aktualisiere das Item
v.update({'old': new_value}) # speichere den aktuellen Zustand
#else: # Variable hat sich nicht geändert
#logger.debug("LOGO: read:{0} newV:{1} = oldV:{2} item:{3} ".format(k, new_value, v['old'], v['item']))
else: # Variable wurde noch nie gelesen
logger.debug('LOGO: read_cycle() first read:{0} value:{1} item:{2}'.format(k, new_value, v['item']))
item(new_value) # aktualisiere das Item zum ersten mal
v.update({'old': new_value}) # speichere den aktuellen Zustand
except Exception as e:
logger.error('LOGO: read_cycle(){0} read error {1} '.format(k, e))
return
if not self.connected:
return
def close(self):
self.connected = False
try:
self.disconnect()
logger.info('LOGO: disconnected {0}:{1}'.format(self.host, self.port))
except:
pass
def disconnect(self):
self.dave.daveDisconnectPLC(self.dc)
self.dave.closePort(self.ph)
def parse_item(self, item):
if 'logo_read' in item.conf:
logo_read = item.conf['logo_read']
if isinstance(logo_read, str):
logo_read = [logo_read, ]
for addr in logo_read:
#logger.debug('LOGO: parse_item {0} {1}'.format(item, addr))
addressInfo = self.getAddressInfo(addr)
if addressInfo is not False:
addressInfo.update({'value': item()}) # Wert des Items hinzufügen
addressInfo.update({'item': item}) # Item hinzufügen
self.reads.update({addr: addressInfo}) # zu lesende Items
if 'logo_write' in item.conf:
if isinstance(item.conf['logo_write'], str):
item.conf['logo_write'] = [item.conf['logo_write'], ]
return self.update_item
def parse_logic(self, logic):
pass
#if 'xxx' in logic.conf:
# self.function(logic['name'])
def update_item(self, item, caller=None, source=None, dest=None):
if 'logo_write' in item.conf:
if caller != 'LOGO':
for addr in item.conf['logo_write']:
#logger.debug('LOGO: update_item() item:{0} addr:{1}'.format(item, addr))
addressInfo = self.getAddressInfo(addr)
if addressInfo is not False:
addressInfo.update({'value': item()}) # Wert des Items hinzufügen
addressInfo.update({'item': item})
self.writes.update({addr: addressInfo}) # zu schreibende Items
def getAddressInfo(self, value): # I1,Q1,M1, AI1, AQ1, AM1, VM850, VM850.2, VMW0
try:
indexDigit = 0
for c in value: # indexDigit: ermittle Index der ersten Zahl
if c.isdigit():
break
else:
indexDigit += 1
indexComma = value.find('.') # ermittle ob ein Komma vorhanden ist (z.B. VM10.6)
#logger.debug('LOGO: getAddressInfo() value:{0} iC:{1} iD:{2}'.format(value, indexComma, indexDigit))
if (len(value) < 2):
raise LOGO('invalid address {0} indexDigit < 1'.format(value))
if indexDigit < 1:
raise LOGO('invalid address {0} indexDigit < 1'.format(value))
typ = value[0:indexDigit] # I Q M AI AQ AM VM VMW
if indexComma == -1: # kein Komma (z.B. M1)
address = int(value[indexDigit:len(value)])
else: # Komma vorhanden (z.B. VM10.6)
address = int(value[indexDigit:indexComma])
bitNr = int(value[indexComma + 1:len(value)])
if (bitNr < 0) or (bitNr > 8):
raise LOGO('invalid address {0} bitNr invalid'.format(value))
#logger.debug('LOGO: getAddressInfo() typ:{0} address:{1}'.format(typ, address))
if typ == 'VMW':
VMaddr = int(self.tableVM_IO['VM']['VMaddr']) # Startaddresse
else:
VMaddr = int(self.tableVM_IO[typ]['VMaddr']) # Startaddresse
if typ in ['I', 'Q', 'M', 'NI', 'NQ']: # I1 Q1 M1
MaxBits = int(self.tableVM_IO[typ]['bits']) # Anzahl bits
if address > MaxBits:
raise LOGO('Address out of range. {0}1-{0}{1}'.format(typ, MaxBits))
q, r = divmod(address - 1, 8)
VMaddr = VMaddr + q * 8
bitNr = r
return {'VMaddr': VMaddr, 'VMbit': bitNr, 'typ': typ, 'DataType': 'bit'}
elif typ in ['AI', 'AQ', 'AM', 'NAI', 'NAQ']: # AI1 AQ1 AM1
MaxWords = int(self.tableVM_IO[typ]['words']) # Anzahl words
if address > MaxWords:
raise LOGO('Address out of range. {0}1-{0}{1}'.format(typ, MaxWords))
VMaddr = VMaddr + ((address - 1) * 2)
return {'VMaddr': VMaddr, 'typ': typ, 'DataType': 'word'}
elif typ == 'VMW': # VMW0
#typ = 'VM'
MaxBytes = int(self.tableVM_IO['VM']['bytes']) # Anzahl words
if address > MaxBytes:
raise LOGO('Address out of range. {0}0-{0}{1}'.format(typ, MaxBytes))
VMaddr = VMaddr + address
return {'VMaddr': VMaddr, 'typ': typ, 'DataType': 'word'}
elif (typ == 'VM') and (indexComma == -1): # VM0
MaxBytes = int(self.tableVM_IO[typ]['bytes']) # Anzahl bytes
if address > MaxBytes:
raise LOGO('Address out of range. {0}0-{0}{1}'.format(typ, MaxBytes))
VMaddr = VMaddr + address
return {'VMaddr': VMaddr, 'typ': typ, 'DataType': 'byte'}
elif (typ == 'VM') and (indexComma > 2): # VM10.6
MaxBytes = int(self.tableVM_IO[typ]['bytes']) # Anzahl bytes
if address > MaxBytes:
raise LOGO('Address out of range. {0}0-{0}{1}'.format(typ, MaxBytes))
VMaddr = VMaddr + address
return {'VMaddr': VMaddr, 'VMbit': bitNr, 'typ': typ, 'DataType': 'bit'}
else:
raise LOGO('invalid typ: {0}'.format(typ))
except Exception as e:
logger.error('LOGO: getAddressInfo() {0} : {1} '.format(value, e))
return False
def get_bit(self, byteval, idx):
return ((byteval & (1 << idx)) != 0)
#***********************************************************************************************
def int_to_bitarr(integer):
string = bin(integer)[2:]
arr = list()
for bit in xrange(8 - len(string)):
arr.append(0)
for bit in string:
arr.append(int(bit))
arr.reverse()
return arr
#***********************************************************************************************
#******************************** READ IN BYTE FORMAT ******************************************
#INPUTS
def get_input_byte(self, input_):
if self.read_bytes(self.daveInputs, 0, input_, 1):
return self.dave.daveGetU8(self.dc)
return -1
#OUTPUTS
def get_output_byte(self, output):
if self.read_bytes(self.daveOutputs, 0, output, 1):
return self.dave.daveGetU8(self.dc)
return -1
#MARKS
def get_marker_byte(self, marker):
if self.read_bytes(self.daveFlags, 0, marker, 1):
return self.dave.daveGetU8(self.dc)
return -1
#VM
def get_vm_byte(self, vm):
if self.read_bytes(self.daveDB, 1, vm, 1):
return self.dave.daveGetU8(self.dc)
return -1
#******************************** READ IN BIT FORMAT ******************************************
#INPUTS
def get_input(self, input, byte):
m_byte = self.get_input_byte(input)
if m_byte >= 0:
byte_arr = int_to_bitarr(m_byte)
return byte_arr[byte]
return False
#OUTPUTS
def get_output(self, output, byte):
m_byte = self.get_output_byte(output)
if m_byte >= 0:
byte_arr = int_to_bitarr(m_byte)
return byte_arr[byte]
return False
def outputs(self):
Q1 = self.get_output(0, 0)
Q2 = self.get_output(0, 1)
Q3 = self.get_output(0, 2)
Q4 = self.get_output(0, 3)
s = ('Q1 : ' + str(Q1))
s += (', Q2 : ' + str(Q2))
s += (', Q3 : ' + str(Q3))
s += (', Q4 : ' + str(Q4))
return s
#MARKS
def get_marker(self, marker, byte):
m_byte = self.get_marker_byte(marker)
if m_byte >= 0:
byte_arr = int_to_bitarr(m_byte)
return byte_arr[byte]
return False
#VM
def get_vm(self, vm, byte):
m_byte = self.get_vm_byte(vm)
if m_byte >= 0:
byte_arr = int_to_bitarr(m_byte)
return byte_arr[byte]
return False
#******************************** READ IN WORD & DOUBLE FORMAT ********************************
#VM
def get_vm_word(self, vm):
if self.read_bytes(self.daveDB, 1, vm, 2):
return self.dave.daveGetU16(self.dc)
return -1
def get_vm_double(self, vm):
if self.read_bytes(self.daveDB, 1, vm, 4):
return self.dave.daveGetU32(self.dc)
return -1
#******************************** WRITE IN BYTE FORMAT ****************************************
#OUTPUTS
def write_output_byte(self, output, value):
buffer = ctypes.c_byte(int(value))
buffer_p = ctypes.pointer(buffer)
return self.dave.daveWriteBytes(self.dc, self.daveOutputs, 0, output, 1, buffer_p)
#MARKS
def write_marker_byte(self, mark, value):
buffer = ctypes.c_byte(int(value))
buffer_p = ctypes.pointer(buffer)
return self.dave.daveWriteBytes(self.dc, self.daveFlags, 0, mark, 1, buffer_p)
#VM
def write_vm_byte(self, vm, value):
buffer = ctypes.c_byte(int(value))
buffer_p = ctypes.pointer(buffer)
return self.dave.daveWriteBytes(self.dc, self.daveDB, 1, vm, 1, buffer_p)
#******************************** WRITE IN WORD & DOUBLE FORMAT *******************************
#VM WORD
def write_vm_word(self, vm, value):
writeBuffer = ctypes.create_string_buffer(2)
buffer_p = ctypes.pointer(writeBuffer) # LesebufferIO
writeBuffer[0] = ((value & 0xFF00) >> 8)
writeBuffer[1] = (value & 0x00FF)
#logger.debug('LOGO: write_vm_word() vm:{0} value:{1} w0:{2} w1:{3}'.format(vm, value, writeBuffer[0], writeBuffer[1]))
return self.dave.daveWriteBytes(self.dc, self.daveDB, 1, vm, 2, buffer_p)
#VM WORD
def write_vm_double(self, vm):
writeBuffer = ctypes.create_string_buffer(4)
pBuf = ctypes.pointer(writeBuffer) # LesebufferIO
writeBuffer[0] = ((value & 0xFF000000) >> 32)
writeBuffer[1] = ((value & 0x00FF0000) >> 16)
writeBuffer[2] = ((value & 0x0000FF00) >> 8)
writeBuffer[3] = (value & 0x000000FF)
#logger.debug('LOGO: write_vm_word() vm:{0} value:{1} w0:{2} w1:{3}'.format(vm, value, writeBuffer[0], writeBuffer[1]))
return self.dave.daveWriteBytes(self.dc, self.daveDB, 1, vm, 2, pBuf)
#******************************** WRITE IN BIT FORMAT *****************************************
#OUTPUTS
def set_output_bit(self, output, position):
return self.dave.daveSetBit(self.dc, self.daveOutputs, 0, output, position)
def clear_output_bit(self, output, position):
return self.dave.daveClrBit(self.dc, self.daveOutputs, 0, output, position)
#VM
def set_vm_bit(self, vm, position):
return self.dave.daveSetBit(self.dc, self.daveDB, 1, vm, position)
def clear_vm_bit(self, vm, position):
return self.dave.daveClrBit(self.dc, self.daveDB, 1, vm, position)
#MARKS
def set_mark_bit(self, mark, position):
return self.dave.daveSetBit(self.dc, self.daveFlags, 0, mark, position)
def clear_mark_bit(self, mark, position):
return self.dave.daveClrBit(self.dc, self.daveFlags, 0, mark, position)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
myplugin = LOGO('smarthome-dummy')
myplugin.connect()
myplugin.run()
| gpl-3.0 |
bendykst/deluge | setup.py | 1 | 12762 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Andrew Resch <[email protected]>
# Copyright (C) 2009 Damien Churchill <[email protected]>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import glob
import os
import platform
import sys
from distutils import cmd
from distutils.command.build import build as _build
from distutils.command.clean import clean as _clean
from setuptools import find_packages, setup
from setuptools.command.test import test as _test
import msgfmt
from version import get_version
try:
from sphinx.setup_command import BuildDoc
except ImportError:
class BuildDoc(object):
pass
def windows_check():
return platform.system() in ('Windows', 'Microsoft')
desktop_data = 'deluge/ui/data/share/applications/deluge.desktop'
class PyTest(_test):
def initialize_options(self):
_test.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
_test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
class BuildTranslations(cmd.Command):
description = 'Compile .po files into .mo files & create .desktop file'
user_options = [
('build-lib', None, "lib build folder"),
('develop', 'D', 'Compile translations in develop mode (deluge/i18n)')
]
boolean_options = ['develop']
def initialize_options(self):
self.build_lib = None
self.develop = False
def finalize_options(self):
self.set_undefined_options('build', ('build_lib', 'build_lib'))
def run(self):
po_dir = os.path.join(os.path.dirname(__file__), 'deluge/i18n/')
if self.develop:
basedir = po_dir
else:
basedir = os.path.join(self.build_lib, 'deluge', 'i18n')
if not windows_check():
# creates the translated desktop file
intltool_merge = 'intltool-merge'
intltool_merge_opts = '--utf8 --quiet --desktop-style'
desktop_in = 'deluge/ui/data/share/applications/deluge.desktop.in'
print('Creating desktop file: %s' % desktop_data)
os.system('C_ALL=C ' + '%s ' * 5 % (intltool_merge, intltool_merge_opts,
po_dir, desktop_in, desktop_data))
print('Compiling po files from %s...' % po_dir),
for path, names, filenames in os.walk(po_dir):
for f in filenames:
upto_date = False
if f.endswith('.po'):
lang = f[:len(f) - 3]
src = os.path.join(path, f)
dest_path = os.path.join(basedir, lang, 'LC_MESSAGES')
dest = os.path.join(dest_path, 'deluge.mo')
if not os.path.exists(dest_path):
os.makedirs(dest_path)
if not os.path.exists(dest):
sys.stdout.write('%s, ' % lang)
sys.stdout.flush()
msgfmt.make(src, dest)
else:
src_mtime = os.stat(src)[8]
dest_mtime = os.stat(dest)[8]
if src_mtime > dest_mtime:
sys.stdout.write('%s, ' % lang)
sys.stdout.flush()
msgfmt.make(src, dest)
else:
upto_date = True
if upto_date:
sys.stdout.write(' po files already upto date. ')
sys.stdout.write('\b\b \nFinished compiling translation files. \n')
class BuildPlugins(cmd.Command):
description = "Build plugins into .eggs"
user_options = [
('install-dir=', None, "develop install folder"),
('develop', 'D', 'Compile plugins in develop mode')
]
boolean_options = ['develop']
def initialize_options(self):
self.install_dir = None
self.develop = False
def finalize_options(self):
pass
def run(self):
# Build the plugin eggs
plugin_path = "deluge/plugins/*"
for path in glob.glob(plugin_path):
if os.path.exists(os.path.join(path, "setup.py")):
if self.develop and self.install_dir:
os.system("cd " + path + "&& " + sys.executable +
" setup.py develop --install-dir=%s" % self.install_dir)
elif self.develop:
os.system("cd " + path + "&& " + sys.executable + " setup.py develop")
else:
os.system("cd " + path + "&& " + sys.executable + " setup.py bdist_egg -d ..")
class EggInfoPlugins(cmd.Command):
description = "create a distribution's .egg-info directory"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# Build the plugin eggs
plugin_path = "deluge/plugins/*"
for path in glob.glob(plugin_path):
if os.path.exists(os.path.join(path, "setup.py")):
os.system("cd " + path + "&& " + sys.executable + " setup.py egg_info")
class Build(_build):
sub_commands = [('build_trans', None), ('build_plugins', None)] + _build.sub_commands
def run(self):
# Run all sub-commands (at least those that need to be run)
_build.run(self)
try:
from deluge._libtorrent import lt
print "Found libtorrent version: %s" % lt.version
except ImportError, e:
print "Warning libtorrent not found: %s" % e
class CleanPlugins(cmd.Command):
description = "Cleans the plugin folders"
user_options = [
('all', 'a', "remove all build output, not just temporary by-products")
]
boolean_options = ['all']
def initialize_options(self):
self.all = None
def finalize_options(self):
self.set_undefined_options('clean', ('all', 'all'))
def run(self):
print("Cleaning the plugin's folders..")
plugin_path = "deluge/plugins/*"
for path in glob.glob(plugin_path):
if os.path.exists(os.path.join(path, "setup.py")):
c = "cd " + path + "&& " + sys.executable + " setup.py clean"
if self.all:
c += " -a"
os.system(c)
# Delete the .eggs
if path[-4:] == ".egg":
print("Deleting %s" % path)
os.remove(path)
egg_info_dir_path = "deluge/plugins/*/*.egg-info"
for path in glob.glob(egg_info_dir_path):
# Delete the .egg-info's directories
if path[-9:] == ".egg-info":
print("Deleting %s" % path)
for fpath in os.listdir(path):
os.remove(os.path.join(path, fpath))
os.removedirs(path)
root_egg_info_dir_path = "deluge*.egg-info"
for path in glob.glob(root_egg_info_dir_path):
print("Deleting %s" % path)
for fpath in os.listdir(path):
os.remove(os.path.join(path, fpath))
os.removedirs(path)
class Clean(_clean):
sub_commands = _clean.sub_commands + [('clean_plugins', None)]
def run(self):
# Run all sub-commands (at least those that need to be run)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
_clean.run(self)
if os.path.exists(desktop_data):
print("Deleting %s" % desktop_data)
os.remove(desktop_data)
cmdclass = {
'build': Build,
'build_trans': BuildTranslations,
'build_plugins': BuildPlugins,
'build_docs': BuildDoc,
'clean_plugins': CleanPlugins,
'clean': Clean,
'egg_info_plugins': EggInfoPlugins,
'test': PyTest,
}
# Data files to be installed to the system
_data_files = [
('share/icons/hicolor/scalable/apps', ['deluge/ui/data/icons/hicolor/scalable/apps/deluge.svg']),
('share/icons/hicolor/128x128/apps', ['deluge/ui/data/icons/hicolor/128x128/apps/deluge.png']),
('share/icons/hicolor/16x16/apps', ['deluge/ui/data/icons/hicolor/16x16/apps/deluge.png']),
('share/icons/hicolor/192x192/apps', ['deluge/ui/data/icons/hicolor/192x192/apps/deluge.png']),
('share/icons/hicolor/22x22/apps', ['deluge/ui/data/icons/hicolor/22x22/apps/deluge.png']),
('share/icons/hicolor/24x24/apps', ['deluge/ui/data/icons/hicolor/24x24/apps/deluge.png']),
('share/icons/hicolor/256x256/apps', ['deluge/ui/data/icons/hicolor/256x256/apps/deluge.png']),
('share/icons/hicolor/32x32/apps', ['deluge/ui/data/icons/hicolor/32x32/apps/deluge.png']),
('share/icons/hicolor/36x36/apps', ['deluge/ui/data/icons/hicolor/36x36/apps/deluge.png']),
('share/icons/hicolor/48x48/apps', ['deluge/ui/data/icons/hicolor/48x48/apps/deluge.png']),
('share/icons/hicolor/64x64/apps', ['deluge/ui/data/icons/hicolor/64x64/apps/deluge.png']),
('share/icons/hicolor/72x72/apps', ['deluge/ui/data/icons/hicolor/72x72/apps/deluge.png']),
('share/icons/hicolor/96x96/apps', ['deluge/ui/data/icons/hicolor/96x96/apps/deluge.png']),
('share/pixmaps', ['deluge/ui/data/pixmaps/deluge.png', 'deluge/ui/data/pixmaps/deluge.xpm']),
('share/man/man1', [
'docs/man/deluge.1',
'docs/man/deluged.1',
'docs/man/deluge-gtk.1',
'docs/man/deluge-web.1',
'docs/man/deluge-console.1'])
]
if not windows_check() and os.path.exists(desktop_data):
_data_files.append(('share/applications', [desktop_data]))
entry_points = {
"console_scripts": [
"deluge-console = deluge.ui.console:start"
],
"gui_scripts": [
"deluge = deluge.main:start_ui",
"deluge-gtk = deluge.ui.gtkui:start",
"deluge-web = deluge.ui.web:start",
"deluged = deluge.main:start_daemon"
]
}
if windows_check():
entry_points["console_scripts"].extend([
"deluge-debug = deluge.main:start_ui",
"deluge-web-debug = deluge.ui.web:start",
"deluged-debug = deluge.main:start_daemon"])
# Main setup
setup(
name="deluge",
version=get_version(prefix='deluge-', suffix='.dev0'),
fullname="Deluge Bittorrent Client",
description="Bittorrent Client",
author="Andrew Resch, Damien Churchill",
author_email="[email protected], [email protected]",
keywords="torrent bittorrent p2p fileshare filesharing",
long_description="""Deluge is a bittorrent client that utilizes a
daemon/client model. There are various user interfaces available for
Deluge such as the GTKui, the webui and a console ui. Deluge uses
libtorrent in it's backend to handle the bittorrent protocol.""",
url="http://deluge-torrent.org",
license="GPLv3",
cmdclass=cmdclass,
tests_require=['pytest'],
data_files=_data_files,
package_data={"deluge": ["ui/gtkui/glade/*.glade",
"ui/gtkui/glade/*.ui",
"ui/data/pixmaps/*.png",
"ui/data/pixmaps/*.svg",
"ui/data/pixmaps/*.ico",
"ui/data/pixmaps/*.gif",
"ui/data/pixmaps/flags/*.png",
"plugins/*.egg",
"i18n/*/LC_MESSAGES/*.mo",
"ui/web/index.html",
"ui/web/css/*.css",
"ui/web/icons/*.png",
"ui/web/images/*.gif",
"ui/web/images/*.png",
"ui/web/js/*.js",
"ui/web/js/*/*.js",
"ui/web/js/*/.order",
"ui/web/js/*/*/*.js",
"ui/web/js/*/*/.order",
"ui/web/js/*/*/*/*.js",
"ui/web/render/*.html",
"ui/web/themes/css/*.css",
"ui/web/themes/images/*/*.gif",
"ui/web/themes/images/*/*.png",
"ui/web/themes/images/*/*/*.gif",
"ui/web/themes/images/*/*/*.png"
]},
packages=find_packages(exclude=["plugins", "docs", "tests"]),
namespace_packages=["deluge", "deluge.plugins"],
entry_points=entry_points
)
| gpl-3.0 |
dwitvliet/CATMAID | django/applications/performancetests/migrations/0001_initial.py | 2 | 2042 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils import timezone
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.TextField()),
('creation_time', models.DateTimeField(default=timezone.now)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TestResult',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time', models.FloatField()),
('result_code', models.IntegerField()),
('result', models.TextField()),
('creation_time', models.DateTimeField(default=timezone.now)),
('version', models.CharField(max_length=50, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TestView',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('method', models.CharField(max_length=50)),
('url', models.TextField()),
('data', jsonfield.fields.JSONField(default={}, blank=True)),
('creation_time', models.DateTimeField(default=timezone.now)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='testresult',
name='view',
field=models.ForeignKey(to='performancetests.TestView'),
preserve_default=True,
),
]
| gpl-3.0 |
pacoqueen/bbinn | framework/caneloni_di_merda.py | 1 | 5754 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, pclases, mx
sys.path.append(os.path.join("..", "formularios"))
import utils
def buscar_rollos_existencias(fecha):
"""
Devuelve una lista de rollos en almacén hasta (incluida)
la fecha dada.
"""
sqlfecha = fecha.strftime('%Y-%m-%d')
fecha_limite_para_comparaciones_con_fechahoras = (fecha + mx.DateTime.oneDay).strftime('%Y-%m-%d')
albaranes_antes_de_fecha = """
SELECT albaran_salida.id
FROM albaran_salida
WHERE albaran_salida.fecha <= '%s'
""" % (sqlfecha)
partes_antes_de_fecha = """
SELECT parte_de_produccion.id
FROM parte_de_produccion
WHERE parte_de_produccion.fecha <= '%s'
""" % (sqlfecha)
articulos_de_rollos_anteriores_a_fecha = """
SELECT rollo.id
FROM rollo
WHERE rollo.fechahora < '%s'
""" % (fecha_limite_para_comparaciones_con_fechahoras)
# Porque fechahora contiene fecha y hora, y p.ej.: 1/1/2006 10:23 no es <= 1/1/2006 0:00 (que sería la fecha recibida)
parte_where = """
articulo.rollo_id IS NOT NULL
AND (articulo.parte_de_produccion_id IN (%s) OR (articulo.parte_de_produccion_id IS NULL
AND (articulo.rollo_id IN (%s AND articulo.rollo_id = rollo.id))))
AND (articulo.albaran_salida_id IS NULL OR articulo.albaran_salida_id NOT IN (%s))
""" % (partes_antes_de_fecha,
articulos_de_rollos_anteriores_a_fecha,
albaranes_antes_de_fecha)
# ¿Daría otro resultado con "AND (articulo.albaran_salida_id IS NULL OR articulo.albaran_salida_id IN (albaranes_POSTERIORES_a_fecha))"?
articulos_en_almacen = pclases.Articulo.select(parte_where)
rollos = [a.rollo for a in articulos_en_almacen]
return rollos
def buscar_rollos_fabricados(fecha_ini, fecha_fin):
"""
Devuelve una lista de rollos fabricados entre las dos fechas recibidas.
"""
rollos = []
partes = pclases.ParteDeProduccion.select(pclases.AND(pclases.ParteDeProduccion.q.fecha >= fecha_ini,
pclases.ParteDeProduccion.q.fecha <= fecha_fin))
for parte in partes:
if parte.es_de_geotextiles():
for articulo in parte.articulos:
rollos.append(articulo.rollo)
fechasqlini = fecha_ini.strftime('%Y-%m-%d')
fechasqlfin = (fecha_fin + mx.DateTime.oneDay).strftime('%Y-%m-%d')
articulos_de_rollos_sin_parte_de_produccion_y_entre_fechas = pclases.Articulo.select("""
rollo_id IN (SELECT id FROM rollo WHERE fechahora >= '%s' AND fechahora < '%s') AND parte_de_produccion_id IS NULL
""" % (fechasqlini, fechasqlfin))
for articulo in articulos_de_rollos_sin_parte_de_produccion_y_entre_fechas:
rollos.append(articulo.rollo)
return rollos
def buscar_rollos_salidos(fecha_ini, fecha_fin):
"""
Devuelve una lista de rollos que han salido entre
las dos fechas recbidas (ambas incluidas).
"""
rollos = []
albaranes = pclases.AlbaranSalida.select(pclases.AND(pclases.AlbaranSalida.q.fecha >= fecha_ini,
pclases.AlbaranSalida.q.fecha <= fecha_fin))
for albaran in albaranes:
for articulo in albaran.articulos:
if articulo.es_rollo():
rollos.append(articulo.rollo)
return rollos
def main():
"""
Devuelve un diccionario con los listados de rollos
en existencias, fabricados y salidos en cada periodo.
"""
ini_enero = mx.DateTime.DateTimeFrom(day = 1, month = 1, year = 2006)
fin_enero = mx.DateTime.DateTimeFrom(day = -1, month = 1, year = 2006)
ini_febrero = mx.DateTime.DateTimeFrom(day = 1, month = 2, year = 2006)
fin_febrero = mx.DateTime.DateTimeFrom(day = -1, month = 2, year = 2006)
rollos_existencias_enero = buscar_rollos_existencias(fin_enero)
print "EXISTENCIAS AL 31 DE ENERO: %s" % (utils.int2str(len(rollos_existencias_enero)))
rollos_fabricados_febrero = buscar_rollos_fabricados(ini_febrero, fin_febrero)
print "FABRICADO EN FEBRERO: %s" % (utils.int2str(len(rollos_fabricados_febrero)))
rollos_salidos_febrero = buscar_rollos_salidos(ini_febrero, fin_febrero)
print "ROLLOS SALIDOS EN FEBRERO: %s" % (utils.int2str(len(rollos_salidos_febrero)))
len_existencias_teoria_febrero = len(rollos_existencias_enero) + len(rollos_fabricados_febrero) - len(rollos_salidos_febrero)
existencias_teoria_febrero = rollos_existencias_enero + rollos_fabricados_febrero
for rollo in rollos_salidos_febrero:
try:
existencias_teoria_febrero.remove(rollo)
except ValueError:
print "Busted! El rollo ID %d salió en febrero pero no estaba en enero ni se fabricó en febrero." % (rollo.id)
if rollo in existencias_teoria_febrero:
print "Busted! El rollo ID %d sigue estando en las existencias de febrero." % (rollo.id)
print "TOTAL TEÓRICO AL 28 DE FEBRERO: %s [%s]" % (utils.int2str(len_existencias_teoria_febrero),
utils.int2str(len(existencias_teoria_febrero)))
rollos_existencias_febrero = buscar_rollos_existencias(fin_febrero)
print "TOTAL BD AL 28 DE FEBRERO: %s" % (utils.int2str(len(rollos_existencias_febrero)))
return {'existencias enero': rollos_existencias_enero,
'fabricados febrero': rollos_fabricados_febrero,
'salidos febrero': rollos_salidos_febrero,
'existencias teoria febrero': existencias_teoria_febrero,
'existencias febrero': rollos_existencias_febrero}
if __name__ == "__main__":
dic_rollos = main()
| gpl-2.0 |
ASOdesk/selenium-pytest-fix | py/test/selenium/webdriver/marionette/mn_options_tests.py | 10 | 4563 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
try:
basestring
except NameError: # Python 3.x
basestring = str
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.options import Log, Options
@pytest.fixture
def driver_kwargs(driver_kwargs):
driver_kwargs['firefox_options'] = Options()
return driver_kwargs
class TestIntegration(object):
def test_we_can_pass_options(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_id("cheese")
class TestUnit(object):
def test_ctor(self):
opts = Options()
assert opts._binary is None
assert opts._preferences == {}
assert opts._profile is None
assert opts._arguments == []
assert isinstance(opts.log, Log)
def test_binary(self):
opts = Options()
assert opts.binary is None
other_binary = FirefoxBinary()
assert other_binary != opts.binary
opts.binary = other_binary
assert other_binary == opts.binary
path = "/path/to/binary"
opts.binary = path
assert isinstance(opts.binary, FirefoxBinary)
assert opts.binary._start_cmd == path
def test_prefs(self):
opts = Options()
assert len(opts.preferences) == 0
assert isinstance(opts.preferences, dict)
opts.set_preference("spam", "ham")
assert len(opts.preferences) == 1
opts.set_preference("eggs", True)
assert len(opts.preferences) == 2
opts.set_preference("spam", "spam")
assert len(opts.preferences) == 2
assert opts.preferences == {"spam": "spam", "eggs": True}
def test_profile(self, tmpdir_factory):
opts = Options()
assert opts.profile is None
other_profile = FirefoxProfile()
assert other_profile != opts.profile
opts.profile = other_profile
assert other_profile == opts.profile
opts.profile = str(tmpdir_factory.mktemp("profile"))
assert isinstance(opts.profile, FirefoxProfile)
def test_arguments(self):
opts = Options()
assert len(opts.arguments) == 0
opts.add_argument("--foo")
assert len(opts.arguments) == 1
opts.arguments.append("--bar")
assert len(opts.arguments) == 2
assert opts.arguments == ["--foo", "--bar"]
def test_to_capabilities(self):
opts = Options()
assert opts.to_capabilities() == {}
profile = FirefoxProfile()
opts.profile = profile
caps = opts.to_capabilities()
assert "moz:firefoxOptions" in caps
assert "profile" in caps["moz:firefoxOptions"]
assert isinstance(caps["moz:firefoxOptions"]["profile"], basestring)
assert caps["moz:firefoxOptions"]["profile"] == profile.encoded
opts.add_argument("--foo")
caps = opts.to_capabilities()
assert "moz:firefoxOptions" in caps
assert "args" in caps["moz:firefoxOptions"]
assert caps["moz:firefoxOptions"]["args"] == ["--foo"]
binary = FirefoxBinary()
opts.binary = binary
caps = opts.to_capabilities()
assert "moz:firefoxOptions" in caps
assert "binary" in caps["moz:firefoxOptions"]
assert isinstance(caps["moz:firefoxOptions"]["binary"], basestring)
assert caps["moz:firefoxOptions"]["binary"] == binary._start_cmd
opts.set_preference("spam", "ham")
caps = opts.to_capabilities()
assert "moz:firefoxOptions" in caps
assert "prefs" in caps["moz:firefoxOptions"]
assert isinstance(caps["moz:firefoxOptions"]["prefs"], dict)
assert caps["moz:firefoxOptions"]["prefs"]["spam"] == "ham"
| apache-2.0 |
fired334/zeroclickinfo-goodies | share/goodie/currency_in/parse.py | 87 | 2347 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Released under the GPL v2 license
# https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
import lxml.html
import sys
#url = "http://en.wikipedia.org/wiki/List_of_circulating_currencies"
url = "https://secure.wikimedia.org/wikipedia/en/wiki/List_of_circulating_currencies"
countries = {}; # country:[[currency, code] [currency, code],...]
country = "" # store current country for each row
currency = "" # store current currency for each row
iso_code = "" # store current iso code for currency
description = "" # store currency and iso code when saving into file
def add_currency(country, currency, iso_code, countries):
"Add country into countries list"
country = country.encode("utf8")
if country in countries:
countries[country].append([currency, iso_code])
else:
countries[country] = [[currency, iso_code]]
def clear_text(text):
"Clear text of anotations in []. When e.g. 'Ascension pound[A]' contains [A]"
start = text.find("[")
if start != -1:
text = text[:start]
return text
tree = lxml.html.parse("download/page.dat").getroot()
tables = tree.find_class("wikitable sortable")
for table in tables:
for row in table.findall('tr'):
cells = row.findall('td')
if len(cells) == 6:
country = cells[0].text_content()
currency = cells[1].text_content()
iso_code = cells[3].text_content()
if len(cells) == 5:
currency = cells[0].text_content()
iso_code = cells[2].text_content()
currency = clear_text(currency)
iso_code = iso_code if iso_code != "None" else ""
if currency != "None" and currency != "":
add_currency(country[1:], currency, iso_code, countries)
"Make output file 'currency.txt' as Perl 'hash table' ready for 'CurrencyIn.pm' module"
output = "currency.txt"
f= open(output, "w")
result = []
for country in sorted(countries):
description = ""
formated_record = []
for record in countries[country]:
iso_code = "" if record[1] == "" else (" (" + record[1] + ")")
currency = record[0]
formated_record.append((currency + iso_code).encode("utf8"))
description = ','.join(str(x) for x in formated_record)
f.write(country.lower() + "\n" + description + "\n")
f.close()
| apache-2.0 |
jessstrap/servotk | tests/wpt/web-platform-tests/tools/pytest/_pytest/genscript.py | 191 | 4129 | """ (deprecated) generate a single-file self-contained version of pytest """
import os
import sys
import pkgutil
import py
import _pytest
def find_toplevel(name):
for syspath in sys.path:
base = py.path.local(syspath)
lib = base/name
if lib.check(dir=1):
return lib
mod = base.join("%s.py" % name)
if mod.check(file=1):
return mod
raise LookupError(name)
def pkgname(toplevel, rootpath, path):
parts = path.parts()[len(rootpath.parts()):]
return '.'.join([toplevel] + [x.purebasename for x in parts])
def pkg_to_mapping(name):
toplevel = find_toplevel(name)
name2src = {}
if toplevel.check(file=1): # module
name2src[toplevel.purebasename] = toplevel.read()
else: # package
for pyfile in toplevel.visit('*.py'):
pkg = pkgname(name, toplevel, pyfile)
name2src[pkg] = pyfile.read()
# with wheels py source code might be not be installed
# and the resulting genscript is useless, just bail out.
assert name2src, "no source code found for %r at %r" %(name, toplevel)
return name2src
def compress_mapping(mapping):
import base64, pickle, zlib
data = pickle.dumps(mapping, 2)
data = zlib.compress(data, 9)
data = base64.encodestring(data)
data = data.decode('ascii')
return data
def compress_packages(names):
mapping = {}
for name in names:
mapping.update(pkg_to_mapping(name))
return compress_mapping(mapping)
def generate_script(entry, packages):
data = compress_packages(packages)
tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py')
exe = tmpl.read()
exe = exe.replace('@SOURCES@', data)
exe = exe.replace('@ENTRY@', entry)
return exe
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption("--genscript", action="store", default=None,
dest="genscript", metavar="path",
help="create standalone pytest script at given target path.")
def pytest_cmdline_main(config):
import _pytest.config
genscript = config.getvalue("genscript")
if genscript:
tw = _pytest.config.create_terminal_writer(config)
tw.line("WARNING: usage of genscript is deprecated.",
red=True)
deps = ['py', '_pytest', 'pytest'] # pluggy is vendored
if sys.version_info < (2,7):
deps.append("argparse")
tw.line("generated script will run on python2.6-python3.3++")
else:
tw.line("WARNING: generated script will not run on python2.6 "
"due to 'argparse' dependency. Use python2.6 "
"to generate a python2.6 compatible script", red=True)
script = generate_script(
'import pytest; raise SystemExit(pytest.cmdline.main())',
deps,
)
genscript = py.path.local(genscript)
genscript.write(script)
tw.line("generated pytest standalone script: %s" % genscript,
bold=True)
return 0
def pytest_namespace():
return {'freeze_includes': freeze_includes}
def freeze_includes():
"""
Returns a list of module names used by py.test that should be
included by cx_freeze.
"""
result = list(_iter_all_modules(py))
result += list(_iter_all_modules(_pytest))
return result
def _iter_all_modules(package, prefix=''):
"""
Iterates over the names of all modules that can be found in the given
package, recursively.
Example:
_iter_all_modules(_pytest) ->
['_pytest.assertion.newinterpret',
'_pytest.capture',
'_pytest.core',
...
]
"""
if type(package) is not str:
path, prefix = package.__path__[0], package.__name__ + '.'
else:
path = package
for _, name, is_package in pkgutil.iter_modules([path]):
if is_package:
for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
yield prefix + m
else:
yield prefix + name
| mpl-2.0 |
knowsis/django | django/conf/locale/sv/formats.py | 118 | 1568 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y', # '10/25/06'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
sharhar/USB-Thing | UpdaterFiles/Lib/python-3.5.1.amd64/Lib/encodings/iso8859_1.py | 266 | 13176 | """ Python Character Mapping Codec iso8859_1 generated from 'MAPPINGS/ISO8859/8859-1.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
massifor/distcc | bench/Summary.py | 28 | 4249 | # benchmark -- automated system for testing distcc correctness
# and performance on various source trees.
# Copyright (C) 2002, 2003 by Martin Pool
# Copyright 2008 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
import buildutil
import os
import statistics
class Summary:
"""Stores and prints results of building different things"""
# Table is a sequence, because we prefer to have things printed
# out in the order they were executed.
def __init__(self):
self._table = []
def store(self, project, compiler, time_info_accumulator):
"""
Args:
project: a Project object
compiler: a Compiler object
time_info_accumulator: the string 'FAIL' or a list of Build.TimeInfo records
The time information is a list because we can build projects repeatedly.
"""
self._table.append((project.name, compiler.name, time_info_accumulator))
def print_raw(self):
from pprint import pprint
pprint(self._table)
@staticmethod
def print_mean_and_sd(times, unit='s', no_sd=False):
assert len(unit) == 1, unit
mean = statistics.mean(times)
sd = statistics.std(times)
if mean is None:
print "%s%s " % ("n/a", sd_space),
else:
print "%8.1f%s " % (mean, unit),
if not no_sd:
if sd is None:
print "%9s " % "n/a",
else:
print "%8.1f%s " % (sd, unit),
def print_table(self):
import time, os, sys
import statistics
# if nothing was run, skip it
if not len(self._table):
return
"""Print out in a nice tabular form"""
print """
========================
distcc benchmark results
========================
"""
print "Date: ", time.ctime()
hosts = os.getenv('DISTCC_HOSTS')
print "DISTCC_HOSTS: %s" % `hosts`
print "Total hosts: %d" % buildutil.count_hosts(hosts)
number_CPUs = os.sysconf('SC_NPROCESSORS_ONLN')
print "Local number of CPUs: %s" % number_CPUs
sys.stdout.flush()
os.system("uname -a")
print ("%-20s %-30s %9s %9s %9s %9s %9s"
% ('project', 'compiler', 'time', 's.d.',
'CPU time',
'CPU util',
'incl serv'))
for row in self._table:
print "%-20s %-30s " % row[:2],
time_info_accumulator = row[2]
if isinstance(time_info_accumulator, str):
print ' ' * 4, time_info_accumulator
else:
real_times = [time_info.real for time_info in time_info_accumulator]
Summary.print_mean_and_sd(real_times)
cpu_times = [time_info.user + time_info.system
for time_info in time_info_accumulator]
self.print_mean_and_sd(cpu_times, no_sd=True)
cpu_util_ratios = (
[100 * cpu_times[i]/(number_CPUs * time_info_accumulator[i].real)
for i in range(len(time_info_accumulator))])
self.print_mean_and_sd(cpu_util_ratios, unit='%', no_sd=True)
include_server_times = [time_info.include_server
for time_info in time_info_accumulator]
if None not in include_server_times:
self.print_mean_and_sd(include_server_times, no_sd=True)
print
| gpl-2.0 |
ovnicraft/edx-platform | common/lib/capa/capa/customrender.py | 60 | 5603 | """
This has custom renderers: classes that know how to render certain problem tags (e.g. <math> and
<solution>) to html.
These tags do not have state, so they just get passed the system (for access to render_template),
and the xml element.
"""
import logging
import re
from cgi import escape as cgi_escape
from lxml import etree
import xml.sax.saxutils as saxutils
from .registry import TagRegistry
log = logging.getLogger(__name__)
registry = TagRegistry()
#-----------------------------------------------------------------------------
class MathRenderer(object):
tags = ['math']
def __init__(self, system, xml):
r"""
Render math using latex-like formatting.
Examples:
<math>$\displaystyle U(r)=4 U_0 $</math>
<math>$r_0$</math>
We convert these to [mathjax]...[/mathjax] and [mathjaxinline]...[/mathjaxinline]
TODO: use shorter tags (but this will require converting problem XML files!)
"""
self.system = system
self.xml = xml
mathstr = re.sub(r'\$(.*)\$', r'[mathjaxinline]\1[/mathjaxinline]', xml.text)
mtag = 'mathjax'
if r'\displaystyle' not in mathstr:
mtag += 'inline'
else:
mathstr = mathstr.replace(r'\displaystyle', '')
self.mathstr = mathstr.replace('mathjaxinline]', '%s]' % mtag)
def get_html(self):
"""
Return the contents of this tag, rendered to html, as an etree element.
"""
# TODO: why are there nested html tags here?? Why are there html tags at all, in fact?
html = '<html><html>%s</html><html>%s</html></html>' % (
self.mathstr, saxutils.escape(self.xml.tail))
try:
xhtml = etree.XML(html)
except Exception as err:
if self.system.DEBUG:
msg = '<html><div class="inline-error"><p>Error %s</p>' % (
str(err).replace('<', '<'))
msg += ('<p>Failed to construct math expression from <pre>%s</pre></p>' %
html.replace('<', '<'))
msg += "</div></html>"
log.error(msg)
return etree.XML(msg)
else:
raise
return xhtml
registry.register(MathRenderer)
#-----------------------------------------------------------------------------
class SolutionRenderer(object):
"""
A solution is just a <span>...</span> which is given an ID, that is used for displaying an
extended answer (a problem "solution") after "show answers" is pressed.
Note that the solution content is NOT rendered and returned in the HTML. It is obtained by an
ajax call.
"""
tags = ['solution']
def __init__(self, system, xml):
self.system = system
self.id = xml.get('id')
def get_html(self):
context = {'id': self.id}
html = self.system.render_template("solutionspan.html", context)
return etree.XML(html)
registry.register(SolutionRenderer)
#-----------------------------------------------------------------------------
class TargetedFeedbackRenderer(object):
"""
A targeted feedback is just a <span>...</span> that is used for displaying an
extended piece of feedback to students if they incorrectly answered a question.
"""
tags = ['targetedfeedback']
def __init__(self, system, xml):
self.system = system
self.xml = xml
def get_html(self):
"""
Return the contents of this tag, rendered to html, as an etree element.
"""
html = '<section class="targeted-feedback-span"><span>{}</span></section>'.format(etree.tostring(self.xml))
try:
xhtml = etree.XML(html)
except Exception as err: # pylint: disable=broad-except
if self.system.DEBUG:
msg = """
<html>
<div class="inline-error">
<p>Error {err}</p>
<p>Failed to construct targeted feedback from <pre>{html}</pre></p>
</div>
</html>
""".format(err=cgi_escape(err), html=cgi_escape(html))
log.error(msg)
return etree.XML(msg)
else:
raise
return xhtml
registry.register(TargetedFeedbackRenderer)
#-----------------------------------------------------------------------------
class ClarificationRenderer(object):
"""
A clarification appears as an inline icon which reveals more information when the user
hovers over it.
e.g. <p>Enter the ROA <clarification>Return on Assets</clarification> for 2015:</p>
"""
tags = ['clarification']
def __init__(self, system, xml):
self.system = system
# Get any text content found inside this tag prior to the first child tag. It may be a string or None type.
initial_text = xml.text if xml.text else ''
self.inner_html = initial_text + ''.join(etree.tostring(element) for element in xml)
self.tail = xml.tail
def get_html(self):
"""
Return the contents of this tag, rendered to html, as an etree element.
"""
context = {'clarification': self.inner_html}
html = self.system.render_template("clarification.html", context)
xml = etree.XML(html)
# We must include any text that was following our original <clarification>...</clarification> XML node.:
xml.tail = self.tail
return xml
registry.register(ClarificationRenderer)
| agpl-3.0 |
lukeiwanski/tensorflow | tensorflow/python/eager/tensor_test.py | 9 | 12405 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for TensorFlow "Eager" Mode's Tensor class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
def _create_tensor(value, device=None, dtype=None):
ctx = context.context()
if device is None:
device = ctx.device_name
if dtype is not None:
dtype = dtype.as_datatype_enum
try:
return ops.EagerTensor(
value, context=ctx._handle, device=device, dtype=dtype)
except core._NotOkStatusException as e: # pylint: disable=protected-access
raise core._status_to_exception(e.code, e.message)
class TFETensorTest(test_util.TensorFlowTestCase):
def testScalarTensor(self):
t = _create_tensor(3, dtype=dtypes.int32)
self.assertAllEqual(t, _create_tensor(np.array(3)))
self.assertEqual(dtypes.int32, t.dtype)
self.assertEqual(0, t.shape.ndims)
self.assertAllEqual([], t.shape.as_list())
self.assertIn("tf.Tensor", str(t))
self.assertIn("tf.Tensor", repr(t))
def testBadConstructorArgs(self):
ctx = context.context()
handle = ctx._handle
device = ctx.device_name
# Missing context.
with self.assertRaisesRegexp(
TypeError, r"Required argument 'context' \(pos 2\) not found"):
ops.EagerTensor(1, device=device)
# Missing device.
with self.assertRaisesRegexp(
TypeError, r"Required argument 'device' \(pos 3\) not found"):
ops.EagerTensor(1, context=handle)
# Bad dtype type.
with self.assertRaisesRegexp(TypeError,
"Expecting a DataType value for dtype. Got"):
ops.EagerTensor(1, context=handle, device=device, dtype="1")
# Following errors happen when trying to copy to GPU.
if not context.context().num_gpus():
self.skipTest("No GPUs found")
with ops.device("/device:GPU:0"):
device = ctx.device_name
# Bad context.
with self.assertRaisesRegexp(
TypeError, "Expecting a PyCapsule encoded context handle. Got"):
ops.EagerTensor(1.0, context=1, device=device)
# Bad device.
with self.assertRaisesRegexp(
TypeError, "Error parsing device argument to CopyToDevice"):
ops.EagerTensor(1.0, context=handle, device=1)
def testNumpyValue(self):
values = np.array([3.0])
t = _create_tensor(values)
self.assertAllEqual(values, t)
def testNumpyValueWithCast(self):
values = np.array([3.0], dtype=np.float32)
t = _create_tensor(values, dtype=dtypes.float64)
self.assertAllEqual(values, t)
ctx = context.context()
# Bad dtype value.
with self.assertRaisesRegexp(TypeError, "Invalid dtype argument value"):
ops.EagerTensor(
values, context=ctx._handle, device=ctx.device_name, dtype=12345)
def testNumpyOrderHandling(self):
n = np.array([[1, 2], [3, 4]], order="F")
t = _create_tensor(n)
self.assertAllEqual([[1, 2], [3, 4]], t)
def testNumpyArrayDtype(self):
tensor = constant_op.constant([1.0, 2.0, 3.0])
numpy_tensor = np.asarray(tensor, dtype=np.int32)
self.assertAllEqual(numpy_tensor, [1, 2, 3])
def testNdimsAgreesWithNumpy(self):
numpy_tensor = np.asarray(1.0)
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(numpy_tensor.ndim, tensor.ndim)
numpy_tensor = np.asarray([1.0, 2.0, 3.0])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(numpy_tensor.ndim, tensor.ndim)
numpy_tensor = np.asarray([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(numpy_tensor.ndim, tensor.ndim)
def testCopy(self):
t = constant_op.constant(1.0)
tt = copy.copy(t)
self.assertAllEqual(tt, 1.0)
del tt
tt = copy.deepcopy(t)
self.assertAllEqual(tt, 1.0)
del tt
self.assertAllEqual(t, 1.0)
def testConstantDtype(self):
self.assertEqual(constant_op.constant(1.0, dtype=np.int64).dtype,
dtypes.int64)
def testTensorAndNumpyMatrix(self):
expected = np.array([[1.0, 2.0], [3.0, 4.0]], np.float32)
actual = _create_tensor([[1.0, 2.0], [3.0, 4.0]])
self.assertAllEqual(expected, actual)
self.assertEqual(np.float32, actual.dtype)
self.assertEqual(dtypes.float32, actual.dtype)
self.assertAllEqual([2, 2], actual.shape.as_list())
def testFloatDowncast(self):
# Unless explicitly specified, float64->float32
t = _create_tensor(3.0)
self.assertEqual(dtypes.float32, t.dtype)
t = _create_tensor(3.0, dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t.dtype)
def testBool(self):
t = _create_tensor(False)
if t:
self.assertFalse(True)
def testIntDowncast(self):
t = _create_tensor(3)
self.assertEqual(dtypes.int32, t.dtype)
t = _create_tensor(3, dtype=dtypes.int64)
self.assertEqual(dtypes.int64, t.dtype)
t = _create_tensor(2**33)
self.assertEqual(dtypes.int64, t.dtype)
def testTensorCreationFailure(self):
with self.assertRaises(ValueError):
# Should fail because the each row of the Python object has a different
# number of columns.
self.assertEqual(None, _create_tensor([[1], [1, 2]]))
def testMultiLineTensorStr(self):
t = _create_tensor(np.eye(3))
tensor_str = str(t)
self.assertIn("shape=%s, dtype=%s" % (t.shape, t.dtype.name), tensor_str)
self.assertIn(str(t), tensor_str)
def testMultiLineTensorRepr(self):
t = _create_tensor(np.eye(3))
tensor_repr = repr(t)
self.assertTrue(tensor_repr.startswith("<"))
self.assertTrue(tensor_repr.endswith(">"))
self.assertIn("id=%d, shape=%s, dtype=%s, numpy=\n%r" %
(t._id, t.shape, t.dtype.name, t.numpy()), tensor_repr)
def testTensorStrReprObeyNumpyPrintOptions(self):
orig_threshold = np.get_printoptions()["threshold"]
orig_edgeitems = np.get_printoptions()["edgeitems"]
np.set_printoptions(threshold=2, edgeitems=1)
t = _create_tensor(np.arange(10, dtype=np.int32))
self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", str(t)))
self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", repr(t)))
# Clean up: reset to previous printoptions.
np.set_printoptions(threshold=orig_threshold, edgeitems=orig_edgeitems)
def testZeroDimTensorStr(self):
t = _create_tensor(42)
self.assertIn("42, shape=(), dtype=int32", str(t))
def testZeroDimTensorRepr(self):
t = _create_tensor(42)
self.assertTrue(repr(t).startswith("<"))
self.assertTrue(repr(t).endswith(">"))
self.assertIn("id=%d, shape=(), dtype=int32, numpy=42" % t._id, repr(t))
def testZeroSizeTensorStr(self):
t = _create_tensor(np.zeros(0, dtype=np.float32))
self.assertIn("[], shape=(0,), dtype=float32", str(t))
def testZeroSizeTensorRepr(self):
t = _create_tensor(np.zeros(0, dtype=np.float32))
self.assertTrue(repr(t).startswith("<"))
self.assertTrue(repr(t).endswith(">"))
self.assertIn("id=%d, shape=(0,), dtype=float32, numpy=%r" % (t._id,
t.numpy()),
repr(t))
def testStringTensor(self):
t_np_orig = np.array([[b"a", b"ab"], [b"abc", b"abcd"]])
t = _create_tensor(t_np_orig)
t_np = t.numpy()
self.assertTrue(np.all(t_np == t_np_orig), "%s vs %s" % (t_np, t_np_orig))
def testIterateOverTensor(self):
l = [[1, 2], [3, 4]]
t = _create_tensor(l)
for list_element, tensor_element in zip(l, t):
self.assertAllEqual(list_element, tensor_element.numpy())
def testStringTensorOnGPU(self):
if not context.context().num_gpus():
self.skipTest("No GPUs found")
with ops.device("/device:GPU:0"):
with self.assertRaisesRegexp(
RuntimeError, "Can't copy Tensor with type string to device"):
_create_tensor("test string")
class TFETensorUtilTest(test_util.TensorFlowTestCase):
def testListOfThree(self):
t1 = _create_tensor([[1, 2], [3, 4], [5, 6]], dtype=dtypes.int32)
t2 = _create_tensor([[1, 2, 5], [3, 4, 5]], dtype=dtypes.int32)
t3 = _create_tensor([[1], [3], [5], [6]], dtype=dtypes.int32)
r = pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2, t3], 0)
self.assertAllEqual(np.array([3, 2, 4]), r.numpy())
r = pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2, t3], 1)
self.assertAllEqual(np.array([2, 3, 1]), r.numpy())
def testEmptyTensorList(self):
a = pywrap_tensorflow.TFE_Py_TensorShapeSlice([], 0)
self.assertTrue(isinstance(a, ops.EagerTensor))
self.assertEqual(0, a.numpy().size)
def testTensorListContainsNonTensors(self):
t1 = _create_tensor([1, 2], dtype=dtypes.int32)
with self.assertRaisesRegexp(
TypeError,
r"Expected a list of EagerTensors but element 1 has type \"str\""):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, "abc"], 0)
with self.assertRaisesRegexp(
TypeError,
r"Expected a list of EagerTensors but element 0 has type \"int\""):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([2, t1], 0)
def testTensorListNotList(self):
t1 = _create_tensor([1, 2], dtype=dtypes.int32)
with self.assertRaisesRegexp(
TypeError,
r"tensors argument must be a list or a tuple. Got \"EagerTensor\""):
pywrap_tensorflow.TFE_Py_TensorShapeSlice(t1, -2)
def testNegativeSliceDim(self):
t1 = _create_tensor([1, 2], dtype=dtypes.int32)
with self.assertRaisesRegexp(
ValueError,
r"Slice dimension must be non-negative. Got -2"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1], -2)
def testUnicode(self):
self.assertEqual(constant_op.constant(u"asdf").numpy(), b"asdf")
def testFloatTensor(self):
self.assertEqual(dtypes.float64, _create_tensor(np.float64()).dtype)
self.assertEqual(dtypes.float32, _create_tensor(np.float32()).dtype)
self.assertEqual(dtypes.float32, _create_tensor(0.0).dtype)
def testSliceDimOutOfRange(self):
t1 = _create_tensor([[1, 2], [3, 4], [5, 6]], dtype=dtypes.int32)
t2 = _create_tensor([1, 2], dtype=dtypes.int32)
t3 = _create_tensor(2, dtype=dtypes.int32)
with self.assertRaisesRegexp(
IndexError,
r"Slice dimension \(2\) must be smaller than rank of all tensors, "
"but tensor at index 0 has rank 2"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1], 2)
with self.assertRaisesRegexp(
IndexError,
r"Slice dimension \(1\) must be smaller than rank of all tensors, "
"but tensor at index 0 has rank 1"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t2], 1)
with self.assertRaisesRegexp(
IndexError,
r"Slice dimension \(1\) must be smaller than rank of all tensors, "
"but tensor at index 1 has rank 1"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2], 1)
with self.assertRaisesRegexp(
IndexError,
r"Slice dimension \(0\) must be smaller than rank of all tensors, "
"but tensor at index 0 has rank 0"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t3], 0)
with self.assertRaisesRegexp(
IndexError,
r"Slice dimension \(0\) must be smaller than rank of all tensors, "
"but tensor at index 2 has rank 0"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice([t2, t1, t3], 0)
if __name__ == "__main__":
test.main()
| apache-2.0 |
yyzybb537/libgo | third_party/boost.context/tools/build/src/build/property.py | 11 | 20200 | # Status: ported, except for tests.
# Base revision: 64070
#
# Copyright 2001, 2002, 2003 Dave Abrahams
# Copyright 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import re
import sys
from b2.util.utility import *
from b2.build import feature
from b2.util import sequence, qualify_jam_action, is_iterable_typed
import b2.util.set
from b2.manager import get_manager
__re_two_ampersands = re.compile ('&&')
__re_comma = re.compile (',')
__re_split_condition = re.compile ('(.*):(<.*)')
__re_split_conditional = re.compile (r'(.+):<(.+)')
__re_colon = re.compile (':')
__re_has_condition = re.compile (r':<')
__re_separate_condition_and_property = re.compile (r'(.*):(<.*)')
__not_applicable_feature='not-applicable-in-this-context'
feature.feature(__not_applicable_feature, [], ['free'])
__abbreviated_paths = False
class Property(object):
__slots__ = ('_feature', '_value', '_condition')
def __init__(self, f, value, condition = []):
if type(f) == type(""):
f = feature.get(f)
# At present, single property has a single value.
assert type(value) != type([])
assert(f.free() or value.find(':') == -1)
self._feature = f
self._value = value
self._condition = condition
def feature(self):
return self._feature
def value(self):
return self._value
def condition(self):
return self._condition
def to_raw(self):
result = "<" + self._feature.name() + ">" + str(self._value)
if self._condition:
result = ",".join(str(p) for p in self._condition) + ':' + result
return result
def __str__(self):
return self.to_raw()
def __hash__(self):
# FIXME: consider if this class should be value-is-identity one
return hash((self._feature, self._value, tuple(self._condition)))
def __cmp__(self, other):
return cmp((self._feature.name(), self._value, self._condition),
(other._feature.name(), other._value, other._condition))
def create_from_string(s, allow_condition=False,allow_missing_value=False):
assert isinstance(s, basestring)
assert isinstance(allow_condition, bool)
assert isinstance(allow_missing_value, bool)
condition = []
import types
if not isinstance(s, types.StringType):
print type(s)
if __re_has_condition.search(s):
if not allow_condition:
raise BaseException("Conditional property is not allowed in this context")
m = __re_separate_condition_and_property.match(s)
condition = m.group(1)
s = m.group(2)
# FIXME: break dependency cycle
from b2.manager import get_manager
feature_name = get_grist(s)
if not feature_name:
if feature.is_implicit_value(s):
f = feature.implied_feature(s)
value = s
else:
raise get_manager().errors()("Invalid property '%s' -- unknown feature" % s)
else:
if feature.valid(feature_name):
f = feature.get(feature_name)
value = get_value(s)
else:
# In case feature name is not known, it is wrong to do a hard error.
# Feature sets change depending on the toolset. So e.g.
# <toolset-X:version> is an unknown feature when using toolset Y.
#
# Ideally we would like to ignore this value, but most of
# Boost.Build code expects that we return a valid Property. For this
# reason we use a sentinel <not-applicable-in-this-context> feature.
#
# The underlying cause for this problem is that python port Property
# is more strict than its Jam counterpart and must always reference
# a valid feature.
f = feature.get(__not_applicable_feature)
value = s
if not value and not allow_missing_value:
get_manager().errors()("Invalid property '%s' -- no value specified" % s)
if condition:
condition = [create_from_string(x) for x in condition.split(',')]
return Property(f, value, condition)
def create_from_strings(string_list, allow_condition=False):
assert is_iterable_typed(string_list, basestring)
return [create_from_string(s, allow_condition) for s in string_list]
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __results
# A cache of results from as_path
__results = {}
reset ()
def set_abbreviated_paths(on=True):
global __abbreviated_paths
__abbreviated_paths = on
def get_abbreviated_paths():
return __abbreviated_paths or '--abbreviated-paths' in sys.argv
def path_order (x, y):
""" Helper for as_path, below. Orders properties with the implicit ones
first, and within the two sections in alphabetical order of feature
name.
"""
if x == y:
return 0
xg = get_grist (x)
yg = get_grist (y)
if yg and not xg:
return -1
elif xg and not yg:
return 1
else:
if not xg:
x = feature.expand_subfeatures([x])
y = feature.expand_subfeatures([y])
if x < y:
return -1
elif x > y:
return 1
else:
return 0
def identify(string):
return string
# Uses Property
def refine (properties, requirements):
""" Refines 'properties' by overriding any non-free properties
for which a different value is specified in 'requirements'.
Conditional requirements are just added without modification.
Returns the resulting list of properties.
"""
assert is_iterable_typed(properties, Property)
assert is_iterable_typed(requirements, Property)
# The result has no duplicates, so we store it in a set
result = set()
# Records all requirements.
required = {}
# All the elements of requirements should be present in the result
# Record them so that we can handle 'properties'.
for r in requirements:
# Don't consider conditional requirements.
if not r.condition():
required[r.feature()] = r
for p in properties:
# Skip conditional properties
if p.condition():
result.add(p)
# No processing for free properties
elif p.feature().free():
result.add(p)
else:
if required.has_key(p.feature()):
result.add(required[p.feature()])
else:
result.add(p)
return sequence.unique(list(result) + requirements)
def translate_paths (properties, path):
""" Interpret all path properties in 'properties' as relative to 'path'
The property values are assumed to be in system-specific form, and
will be translated into normalized form.
"""
result = []
for p in properties:
if p.feature().path():
values = __re_two_ampersands.split(p.value())
new_value = "&&".join(os.path.join(path, v) for v in values)
if new_value != p.value():
result.append(Property(p.feature(), new_value, p.condition()))
else:
result.append(p)
else:
result.append (p)
return result
def translate_indirect(properties, context_module):
"""Assumes that all feature values that start with '@' are
names of rules, used in 'context-module'. Such rules can be
either local to the module or global. Qualified local rules
with the name of the module."""
assert is_iterable_typed(properties, Property)
assert isinstance(context_module, basestring)
result = []
for p in properties:
if p.value()[0] == '@':
q = qualify_jam_action(p.value()[1:], context_module)
get_manager().engine().register_bjam_action(q)
result.append(Property(p.feature(), '@' + q, p.condition()))
else:
result.append(p)
return result
def validate (properties):
""" Exit with error if any of the properties is not valid.
properties may be a single property or a sequence of properties.
"""
if isinstance(properties, Property):
properties = [properties]
assert is_iterable_typed(properties, Property)
for p in properties:
__validate1(p)
def expand_subfeatures_in_conditions (properties):
assert is_iterable_typed(properties, Property)
result = []
for p in properties:
if not p.condition():
result.append(p)
else:
expanded = []
for c in p.condition():
if c.feature().name().startswith("toolset") or c.feature().name() == "os":
# It common that condition includes a toolset which
# was never defined, or mentiones subfeatures which
# were never defined. In that case, validation will
# only produce an spirious error, so don't validate.
expanded.extend(feature.expand_subfeatures ([c], True))
else:
expanded.extend(feature.expand_subfeatures([c]))
result.append(Property(p.feature(), p.value(), expanded))
return result
# FIXME: this should go
def split_conditional (property):
""" If 'property' is conditional property, returns
condition and the property, e.g
<variant>debug,<toolset>gcc:<inlining>full will become
<variant>debug,<toolset>gcc <inlining>full.
Otherwise, returns empty string.
"""
assert isinstance(property, basestring)
m = __re_split_conditional.match (property)
if m:
return (m.group (1), '<' + m.group (2))
return None
def select (features, properties):
""" Selects properties which correspond to any of the given features.
"""
assert is_iterable_typed(properties, basestring)
result = []
# add any missing angle brackets
features = add_grist (features)
return [p for p in properties if get_grist(p) in features]
def validate_property_sets (sets):
if __debug__:
from .property_set import PropertySet
assert is_iterable_typed(sets, PropertySet)
for s in sets:
validate(s.all())
def evaluate_conditionals_in_context (properties, context):
""" Removes all conditional properties which conditions are not met
For those with met conditions, removes the condition. Properies
in conditions are looked up in 'context'
"""
if __debug__:
from .property_set import PropertySet
assert is_iterable_typed(properties, Property)
assert isinstance(context, PropertySet)
base = []
conditional = []
for p in properties:
if p.condition():
conditional.append (p)
else:
base.append (p)
result = base[:]
for p in conditional:
# Evaluate condition
# FIXME: probably inefficient
if all(x in context for x in p.condition()):
result.append(Property(p.feature(), p.value()))
return result
def change (properties, feature, value = None):
""" Returns a modified version of properties with all values of the
given feature replaced by the given value.
If 'value' is None the feature will be removed.
"""
assert is_iterable_typed(properties, basestring)
assert isinstance(feature, basestring)
assert isinstance(value, (basestring, type(None)))
result = []
feature = add_grist (feature)
for p in properties:
if get_grist (p) == feature:
if value:
result.append (replace_grist (value, feature))
else:
result.append (p)
return result
################################################################
# Private functions
def __validate1 (property):
""" Exit with error if property is not valid.
"""
assert isinstance(property, Property)
msg = None
if not property.feature().free():
feature.validate_value_string (property.feature(), property.value())
###################################################################
# Still to port.
# Original lines are prefixed with "# "
#
#
# import utility : ungrist ;
# import sequence : unique ;
# import errors : error ;
# import feature ;
# import regex ;
# import sequence ;
# import set ;
# import path ;
# import assert ;
#
#
# rule validate-property-sets ( property-sets * )
# {
# for local s in $(property-sets)
# {
# validate [ feature.split $(s) ] ;
# }
# }
#
def remove(attributes, properties):
"""Returns a property sets which include all the elements
in 'properties' that do not have attributes listed in 'attributes'."""
if isinstance(attributes, basestring):
attributes = [attributes]
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
attributes_new = feature.attributes(get_grist(e))
has_common_features = 0
for a in attributes_new:
if a in attributes:
has_common_features = 1
break
if not has_common_features:
result += e
return result
def take(attributes, properties):
"""Returns a property set which include all
properties in 'properties' that have any of 'attributes'."""
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))):
result.append(e)
return result
def translate_dependencies(properties, project_id, location):
assert is_iterable_typed(properties, Property)
assert isinstance(project_id, basestring)
assert isinstance(location, basestring)
result = []
for p in properties:
if not p.feature().dependency():
result.append(p)
else:
v = p.value()
m = re.match("(.*)//(.*)", v)
if m:
rooted = m.group(1)
if rooted[0] == '/':
# Either project id or absolute Linux path, do nothing.
pass
else:
rooted = os.path.join(os.getcwd(), location, rooted)
result.append(Property(p.feature(), rooted + "//" + m.group(2), p.condition()))
elif os.path.isabs(v):
result.append(p)
else:
result.append(Property(p.feature(), project_id + "//" + v, p.condition()))
return result
class PropertyMap:
""" Class which maintains a property set -> string mapping.
"""
def __init__ (self):
self.__properties = []
self.__values = []
def insert (self, properties, value):
""" Associate value with properties.
"""
assert is_iterable_typed(properties, basestring)
assert isinstance(value, basestring)
self.__properties.append(properties)
self.__values.append(value)
def find (self, properties):
""" Return the value associated with properties
or any subset of it. If more than one
subset has value assigned to it, return the
value for the longest subset, if it's unique.
"""
assert is_iterable_typed(properties, basestring)
return self.find_replace (properties)
def find_replace(self, properties, value=None):
assert is_iterable_typed(properties, basestring)
assert isinstance(value, (basestring, type(None)))
matches = []
match_ranks = []
for i in range(0, len(self.__properties)):
p = self.__properties[i]
if b2.util.set.contains (p, properties):
matches.append (i)
match_ranks.append(len(p))
best = sequence.select_highest_ranked (matches, match_ranks)
if not best:
return None
if len (best) > 1:
raise NoBestMatchingAlternative ()
best = best [0]
original = self.__values[best]
if value:
self.__values[best] = value
return original
# local rule __test__ ( )
# {
# import errors : try catch ;
# import feature ;
# import feature : feature subfeature compose ;
#
# # local rules must be explicitly re-imported
# import property : path-order ;
#
# feature.prepare-test property-test-temp ;
#
# feature toolset : gcc : implicit symmetric ;
# subfeature toolset gcc : version : 2.95.2 2.95.3 2.95.4
# 3.0 3.0.1 3.0.2 : optional ;
# feature define : : free ;
# feature runtime-link : dynamic static : symmetric link-incompatible ;
# feature optimization : on off ;
# feature variant : debug release : implicit composite symmetric ;
# feature rtti : on off : link-incompatible ;
#
# compose <variant>debug : <define>_DEBUG <optimization>off ;
# compose <variant>release : <define>NDEBUG <optimization>on ;
#
# import assert ;
# import "class" : new ;
#
# validate <toolset>gcc <toolset>gcc-3.0.1 : $(test-space) ;
#
# assert.result <toolset>gcc <rtti>off <define>FOO
# : refine <toolset>gcc <rtti>off
# : <define>FOO
# : $(test-space)
# ;
#
# assert.result <toolset>gcc <optimization>on
# : refine <toolset>gcc <optimization>off
# : <optimization>on
# : $(test-space)
# ;
#
# assert.result <toolset>gcc <rtti>off
# : refine <toolset>gcc : <rtti>off : $(test-space)
# ;
#
# assert.result <toolset>gcc <rtti>off <rtti>off:<define>FOO
# : refine <toolset>gcc : <rtti>off <rtti>off:<define>FOO
# : $(test-space)
# ;
#
# assert.result <toolset>gcc:<define>foo <toolset>gcc:<define>bar
# : refine <toolset>gcc:<define>foo : <toolset>gcc:<define>bar
# : $(test-space)
# ;
#
# assert.result <define>MY_RELEASE
# : evaluate-conditionals-in-context
# <variant>release,<rtti>off:<define>MY_RELEASE
# : <toolset>gcc <variant>release <rtti>off
#
# ;
#
# try ;
# validate <feature>value : $(test-space) ;
# catch "Invalid property '<feature>value': unknown feature 'feature'." ;
#
# try ;
# validate <rtti>default : $(test-space) ;
# catch \"default\" is not a known value of feature <rtti> ;
#
# validate <define>WHATEVER : $(test-space) ;
#
# try ;
# validate <rtti> : $(test-space) ;
# catch "Invalid property '<rtti>': No value specified for feature 'rtti'." ;
#
# try ;
# validate value : $(test-space) ;
# catch "value" is not a value of an implicit feature ;
#
#
# assert.result <rtti>on
# : remove free implicit : <toolset>gcc <define>foo <rtti>on : $(test-space) ;
#
# assert.result <include>a
# : select include : <include>a <toolset>gcc ;
#
# assert.result <include>a
# : select include bar : <include>a <toolset>gcc ;
#
# assert.result <include>a <toolset>gcc
# : select include <bar> <toolset> : <include>a <toolset>gcc ;
#
# assert.result <toolset>kylix <include>a
# : change <toolset>gcc <include>a : <toolset> kylix ;
#
# # Test ordinary properties
# assert.result
# : split-conditional <toolset>gcc
# ;
#
# # Test properties with ":"
# assert.result
# : split-conditional <define>FOO=A::B
# ;
#
# # Test conditional feature
# assert.result <toolset>gcc,<toolset-gcc:version>3.0 <define>FOO
# : split-conditional <toolset>gcc,<toolset-gcc:version>3.0:<define>FOO
# ;
#
# feature.finish-test property-test-temp ;
# }
#
| mit |
malikabhi05/upm | examples/python/o2.py | 6 | 1981 | #!/usr/bin/env python
# Author: Zion Orent <[email protected]>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_o2 as upmO2
def main():
# This was tested with the O2 Oxygen Concentration Sensor Module
# Instantiate a O2 on analog pin A0
myO2 = upmO2.O2(0)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This lets you run code on exit, including functions from myO2
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
while(1):
print("The output voltage is: {0}mV".format(
myO2.voltageValue()))
time.sleep(.1)
if __name__ == '__main__':
main()
| mit |
Sourcegasm/gradientni-spust-predstavitev | Elipsa/select_data.py | 2 | 3571 | from gradient_descent import get_data
def select_data():
done = False
while not done:
try:
print('Select input data set:')
print(' 1 Mars half year')
print(' 2 Mars full (whole year measured every Earth month)')
print(' 3 Mars small (every fourth point of \'Mars full\')')
print(' 4 Earth full (every 14 days)')
print(' 5 Saturn full (every 100 days since 1987 = one Saturn year)')
print(' 6 Jupiter full (every 60 days since 2005 = one Jupiter year)')
print(' 7 Halley full (every 30 days 1984 - 1987)')
print(' 8 custom file path')
answer = int(input('Your selection: '))
if answer == 1:
data = get_data('Podatki/mars_half_year.csv')
elif answer == 2:
data = get_data('Podatki/mars_full.csv')
elif answer == 3:
data = get_data('Podatki/mars_full.csv')[::4]
elif answer == 4:
data = get_data('Podatki/earth.csv')
elif answer == 5:
data = get_data('Podatki/saturn.csv')
elif answer == 6:
data = get_data('Podatki/jupiter.csv')
elif answer == 7:
data = get_data('Podatki/halley.csv')
elif answer == 8:
data = get_data(input('Path: '))
else:
continue
print('\nSelect start parameters:')
print(' 1 default [10, 0, 10, 0, 0, 0]')
print(' 2 Mars approximation [-100, 0, -100, -300, 200, 30000]')
print(' 3 Mars half year wrong minimum (hyperbola) [-1017000, 39000, -299600, -2983000, 561000, 23157000]')
print(' 4 Jupiter approximation [-813700, -6200, -785600, -6000, -1600, 5376000]')
print(' 5 Saturn approximation [5541730, 107633, 6468945, 1673, -90184, 72001305]')
print(' 6 Halley approximation [-1000, -1400, -600, -25000, 30000, 230000]')
print(' 7 custom params')
try:
answer = int(input('Your selection: '))
except ValueError:
params = [10, 0, 10, 0, 0, -300]
if answer == 1:
params = [10, 0, 10, 0, 0, -300]
elif answer == 2:
params = [-100, 0, -100, -300, 200, 30000]
elif answer == 3:
params = [-1017000, 39000, -299600, -2983000, 561000, 23157000]
elif answer == 4:
params = [-813700, -6200, -785600, -6000, -1600, 5376000]
elif answer == 5:
params = [5541730, 107633, 6468945, 1673, -90184, 72001305]
elif answer == 6:
params = [-1000, -1400, -600, -25000, 30000, 230000]
elif answer == 7:
params = [float(i) for i in input('Params separated by ,: ').split(',')]
else:
continue
print('\nRecommended steps:')
print(' Mars: 1e-7')
print(' Earth: 1e-6')
print(' Saturn: 7e-11')
print(' Jupiter, Halley: 1e-9')
try:
step = float(input('Define step (default is 1e-6): '))
except ValueError:
step = 1e-6
# load Earth data
earth_data = get_data('Podatki/earth.csv')
done = True
except ValueError:
print('Invalid input!')
print()
return data, earth_data, params, step
| gpl-3.0 |
alex/pinax | pinax/projects/social_project/manage.py | 20 | 1092 | #!/usr/bin/env python
import sys
from os.path import abspath, dirname, join
try:
import pinax
except ImportError:
sys.stderr.write("Error: Can't import Pinax. Make sure you are in a virtual environment that has Pinax installed or create one with pinax-boot.py.\n")
sys.exit(1)
from django.conf import settings
from django.core.management import setup_environ, execute_from_command_line
try:
import settings as settings_mod # Assumed to be in the same directory.
except ImportError:
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
# setup the environment before we start accessing things in the settings.
setup_environ(settings_mod)
sys.path.insert(0, join(settings.PINAX_ROOT, "apps"))
sys.path.insert(0, join(settings.PROJECT_ROOT, "apps"))
if __name__ == "__main__":
execute_from_command_line()
| mit |
mcella/django | tests/test_runner/test_debug_sql.py | 146 | 2971 | import sys
import unittest
from django.db import connection
from django.test import TestCase
from django.test.runner import DiscoverRunner
from django.utils import six
from django.utils.encoding import force_text
from .models import Person
@unittest.skipUnless(connection.vendor == 'sqlite', 'Only run on sqlite so we can check output SQL.')
class TestDebugSQL(unittest.TestCase):
class PassingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='pass').count()
class FailingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='fail').count()
self.fail()
class ErrorTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='error').count()
raise Exception
def _test_output(self, verbosity):
runner = DiscoverRunner(debug_sql=True, verbosity=0)
suite = runner.test_suite()
suite.addTest(self.FailingTest())
suite.addTest(self.ErrorTest())
suite.addTest(self.PassingTest())
old_config = runner.setup_databases()
stream = six.StringIO()
resultclass = runner.get_resultclass()
runner.test_runner(
verbosity=verbosity,
stream=stream,
resultclass=resultclass,
).run(suite)
runner.teardown_databases(old_config)
if six.PY2:
stream.buflist = [force_text(x) for x in stream.buflist]
return stream.getvalue()
def test_output_normal(self):
full_output = self._test_output(1)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertNotIn(output, full_output)
def test_output_verbose(self):
full_output = self._test_output(2)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertIn(output, full_output)
expected_outputs = [
('''SELECT COUNT(*) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = 'error';'''),
('''SELECT COUNT(*) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = 'fail';'''),
]
verbose_expected_outputs = [
# Output format changed in Python 3.5+
x.format('' if sys.version_info < (3, 5) else 'TestDebugSQL.') for x in [
'runTest (test_runner.test_debug_sql.{}FailingTest) ... FAIL',
'runTest (test_runner.test_debug_sql.{}ErrorTest) ... ERROR',
'runTest (test_runner.test_debug_sql.{}PassingTest) ... ok',
]
] + [
('''SELECT COUNT(*) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = 'pass';'''),
]
| bsd-3-clause |
openradar/TINT | tint/data_utils.py | 1 | 2323 | """
tint.data_utils
===============
Tools for obtaining and handling radar data.
"""
import tempfile
from boto.s3.connection import S3Connection
from datetime import datetime, timedelta
import pyart
def read_nexrad_key(key):
"""
Returns pyart radar object from nexrad S3 key.
"""
tmp = tempfile.NamedTemporaryFile()
key.get_contents_to_filename(tmp.name)
return pyart.io.read(tmp.name)
def get_nexrad_keys(site, start=None, end=None):
"""
Get generator of pyart radar objects for all nexrad data between two
datetimes from Amazon S3.
----------
site : string
site code e.g. 'khgx'
start : string
datetime e.g. '20180101_000000'
end : string
same format as start
"""
fmt = '%Y%m%d_%H%M%S'
if start is None:
start = datetime.utcnow() - timedelta(hours=1)
else:
start = datetime.strptime(start, fmt)
if end is None:
end = datetime.utcnow()
else:
end = datetime.strptime(end, fmt)
if end < start:
print('end datetime precedes start datetime')
return
site = site.upper()
dates = []
day_i = start
while day_i < end:
dates.append(day_i)
day_i += timedelta(days=1)
date_keys = [datetime.strftime(date, '%Y/%m/%d/' + site) for date in dates]
conn = S3Connection(anon=True)
bucket = conn.get_bucket('noaa-nexrad-level2')
keys = [key for date_key in date_keys
for key in list(bucket.list(date_key))
if '.tar' not in str(key)]
if len(keys) == 0:
print('Found 0 files.')
return
# Key ealier for keys before 'V06'
if '.gz>' in str(keys[0]):
key_fmt = site + '%Y%m%d_%H%M%S_V06.gz>'
key_fmt_earlier = site + '%Y%m%d_%H%M%S.gz>'
else:
key_fmt = site + '%Y%m%d_%H%M%S_V06>'
key_fmt_earlier = site + '%Y%m%d_%H%M%S>'
key_dts = []
for key in keys:
try:
key_dts.append(datetime.strptime(str(key).split('/')[-1], key_fmt))
except ValueError:
key_dts.append(
datetime.strptime(str(key).split('/')[-1], key_fmt_earlier))
key_dts = zip(keys, key_dts)
keys = [key for key, dt in key_dts if dt > start and dt < end]
print('Found', len(keys), 'keys.')
return keys
| bsd-2-clause |
jlgoldman/writetogov | util/fips.py | 1 | 3649 | import collections
FIPSInfo = collections.namedtuple('FIPSInfo', ['name', 'fips_code', 'state_code'])
def get_by_fips_code(fips_code):
return INFOS_BY_FIPS_CODE.get(fips_code)
def get_by_state_code(state_code):
return INFOS_BY_STATE_CODE.get(state_code)
def get_by_state_name(state_name):
return INFOS_BY_STATE_NAME.get(state_name.strip().title())
def get_state_name_for_code(state_code):
fips_info = get_by_state_code(state_code)
return fips_info.name if fips_info else None
def get_state_code_for_name(state_name):
fips_info = get_by_state_name(state_name)
return fips_info.state_code if fips_info else None
FIPS_INFOS = map(lambda t: FIPSInfo(*t), (
('Alabama', '01', 'AL'),
('Alaska', '02', 'AK'),
('Arizona', '04', 'AZ'),
('Arkansas', '05', 'AR'),
('California', '06', 'CA'),
('Colorado', '08', 'CO'),
('Connecticut', '09', 'CT'),
('Delaware', '10', 'DE'),
('District of Columbia', '11', 'DC'),
('Florida', '12', 'FL'),
('Georgia', '13', 'GA'),
('Hawaii', '15', 'HI'),
('Idaho', '16', 'ID'),
('Illinois', '17', 'IL'),
('Indiana', '18', 'IN'),
('Iowa', '19', 'IA'),
('Kansas', '20', 'KS'),
('Kentucky', '21', 'KY'),
('Louisiana', '22', 'LA'),
('Maine', '23', 'ME'),
('Maryland', '24', 'MD'),
('Massachusetts', '25', 'MA'),
('Michigan', '26', 'MI'),
('Minnesota', '27', 'MN'),
('Mississippi', '28', 'MS'),
('Missouri', '29', 'MO'),
('Montana', '30', 'MT'),
('Nebraska', '31', 'NE'),
('Nevada', '32', 'NV'),
('New Hampshire', '33', 'NH'),
('New Jersey', '34', 'NJ'),
('New Mexico', '35', 'NM'),
('New York', '36', 'NY'),
('North Carolina', '37', 'NC'),
('North Dakota', '38', 'ND'),
('Ohio', '39', 'OH'),
('Oklahoma', '40', 'OK'),
('Oregon', '41', 'OR'),
('Pennsylvania', '42', 'PA'),
('Rhode Island', '44', 'RI'),
('South Carolina', '45', 'SC'),
('South Dakota', '46', 'SD'),
('Tennessee', '47', 'TN'),
('Texas', '48', 'TX'),
('Utah', '49', 'UT'),
('Vermont', '50', 'VT'),
('Virginia', '51', 'VA'),
('Washington', '53', 'WA'),
('West Virginia', '54', 'WV'),
('Wisconsin', '55', 'WI'),
('Wyoming', '56', 'WY'),
('American Samoa', '60', 'AS'),
('Guam', '66', 'GU'),
('Northern Mariana Islands', '69', 'MP'),
('Puerto Rico', '72', 'PR'),
('Virgin Islands of the U.S.', '78', 'VI'),
))
INFOS_BY_STATE_NAME = {t[0]: t for t in FIPS_INFOS}
INFOS_BY_FIPS_CODE = {t[1]: t for t in FIPS_INFOS}
INFOS_BY_STATE_CODE = {t[2]: t for t in FIPS_INFOS}
ONE_DISTRICT_STATE_CODES = set([
'AK','AS','DC','DE','GU','MP','MT','ND','PR','SD','VI','VT','WY',
])
| bsd-3-clause |
netscaler/neutron | neutron/tests/unit/cisco/test_nexus_db.py | 4 | 7652 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import testtools
from neutron.db import api as db
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.db import nexus_db_v2 as nxdb
from neutron.tests import base
class CiscoNexusDbTest(base.BaseTestCase):
"""Unit tests for cisco.db.nexus_models_v2.NexusPortBinding model."""
NpbObj = collections.namedtuple('NpbObj', 'port vlan switch instance')
def setUp(self):
super(CiscoNexusDbTest, self).setUp()
db.configure_db()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def _npb_test_obj(self, pnum, vnum, switch=None, instance=None):
"""Create a Nexus port binding test object from a pair of numbers."""
if pnum is 'router':
port = pnum
else:
port = '1/%s' % str(pnum)
vlan = str(vnum)
if switch is None:
switch = '10.9.8.7'
if instance is None:
instance = 'instance_%s_%s' % (str(pnum), str(vnum))
return self.NpbObj(port, vlan, switch, instance)
def _assert_equal(self, npb, npb_obj):
self.assertEqual(npb.port_id, npb_obj.port)
self.assertEqual(int(npb.vlan_id), int(npb_obj.vlan))
self.assertEqual(npb.switch_ip, npb_obj.switch)
self.assertEqual(npb.instance_id, npb_obj.instance)
def _add_to_db(self, npbs):
for npb in npbs:
nxdb.add_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def test_nexusportbinding_add_remove(self):
npb11 = self._npb_test_obj(10, 100)
npb = nxdb.add_nexusport_binding(
npb11.port, npb11.vlan, npb11.switch, npb11.instance)
self._assert_equal(npb, npb11)
npb = nxdb.remove_nexusport_binding(
npb11.port, npb11.vlan, npb11.switch, npb11.instance)
self.assertEqual(len(npb), 1)
self._assert_equal(npb[0], npb11)
with testtools.ExpectedException(c_exc.NexusPortBindingNotFound):
nxdb.remove_nexusport_binding(
npb11.port, npb11.vlan, npb11.switch, npb11.instance)
def test_nexusportbinding_get(self):
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
npb22 = self._npb_test_obj(20, 200)
self._add_to_db([npb11, npb21, npb22])
npb = nxdb.get_nexusport_binding(
npb11.port, npb11.vlan, npb11.switch, npb11.instance)
self.assertEqual(len(npb), 1)
self._assert_equal(npb[0], npb11)
npb = nxdb.get_nexusport_binding(
npb21.port, npb21.vlan, npb21.switch, npb21.instance)
self.assertEqual(len(npb), 1)
self._assert_equal(npb[0], npb21)
npb = nxdb.get_nexusport_binding(
npb22.port, npb22.vlan, npb22.switch, npb22.instance)
self.assertEqual(len(npb), 1)
self._assert_equal(npb[0], npb22)
with testtools.ExpectedException(c_exc.NexusPortBindingNotFound):
nxdb.get_nexusport_binding(
npb21.port, npb21.vlan, npb21.switch, "dummyInstance")
def test_nexusvlanbinding_get(self):
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
npb22 = self._npb_test_obj(20, 200)
self._add_to_db([npb11, npb21, npb22])
npb_all_v100 = nxdb.get_nexusvlan_binding(npb11.vlan, npb11.switch)
self.assertEqual(len(npb_all_v100), 2)
npb_v200 = nxdb.get_nexusvlan_binding(npb22.vlan, npb22.switch)
self.assertEqual(len(npb_v200), 1)
self._assert_equal(npb_v200[0], npb22)
with testtools.ExpectedException(c_exc.NexusPortBindingNotFound):
nxdb.get_nexusvlan_binding(npb21.vlan, "dummySwitch")
def test_nexusvmbinding_get(self):
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
npb22 = self._npb_test_obj(20, 200)
self._add_to_db([npb11, npb21, npb22])
npb = nxdb.get_nexusvm_bindings(npb21.vlan, npb21.instance)[0]
self._assert_equal(npb, npb21)
npb = nxdb.get_nexusvm_bindings(npb22.vlan, npb22.instance)[0]
self._assert_equal(npb, npb22)
with testtools.ExpectedException(c_exc.NexusPortBindingNotFound):
nxdb.get_nexusvm_bindings(npb21.vlan, "dummyInstance")
def test_nexusportvlanswitchbinding_get(self):
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
self._add_to_db([npb11, npb21])
npb = nxdb.get_port_vlan_switch_binding(
npb11.port, npb11.vlan, npb11.switch)
self.assertEqual(len(npb), 1)
self._assert_equal(npb[0], npb11)
with testtools.ExpectedException(c_exc.NexusPortBindingNotFound):
nxdb.get_port_vlan_switch_binding(
npb21.port, npb21.vlan, "dummySwitch")
def test_nexusportswitchbinding_get(self):
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100, switch='2.2.2.2')
npb22 = self._npb_test_obj(20, 200, switch='2.2.2.2')
self._add_to_db([npb11, npb21, npb22])
npb = nxdb.get_port_switch_bindings(npb11.port, npb11.switch)
self.assertEqual(len(npb), 1)
self._assert_equal(npb[0], npb11)
npb_all_p20 = nxdb.get_port_switch_bindings(npb21.port, npb21.switch)
self.assertEqual(len(npb_all_p20), 2)
npb = nxdb.get_port_switch_bindings(npb21.port, "dummySwitch")
self.assertIsNone(npb)
def test_nexussvibinding_get(self):
npbr1 = self._npb_test_obj('router', 100)
npb21 = self._npb_test_obj(20, 100)
self._add_to_db([npbr1, npb21])
npb_svi = nxdb.get_nexussvi_bindings()
self.assertEqual(len(npb_svi), 1)
self._assert_equal(npb_svi[0], npbr1)
npbr2 = self._npb_test_obj('router', 200)
self._add_to_db([npbr2])
npb_svi = nxdb.get_nexussvi_bindings()
self.assertEqual(len(npb_svi), 2)
def test_nexusbinding_update(self):
npb11 = self._npb_test_obj(10, 100, switch='1.1.1.1', instance='test')
npb21 = self._npb_test_obj(20, 100, switch='1.1.1.1', instance='test')
self._add_to_db([npb11, npb21])
npb_all_v100 = nxdb.get_nexusvlan_binding(npb11.vlan, '1.1.1.1')
self.assertEqual(len(npb_all_v100), 2)
npb22 = self._npb_test_obj(20, 200, switch='1.1.1.1', instance='test')
npb = nxdb.update_nexusport_binding(npb21.port, 200)
self._assert_equal(npb, npb22)
npb_all_v100 = nxdb.get_nexusvlan_binding(npb11.vlan, '1.1.1.1')
self.assertEqual(len(npb_all_v100), 1)
self._assert_equal(npb_all_v100[0], npb11)
npb = nxdb.update_nexusport_binding(npb21.port, 0)
self.assertIsNone(npb)
npb33 = self._npb_test_obj(30, 300, switch='1.1.1.1', instance='test')
with testtools.ExpectedException(c_exc.NexusPortBindingNotFound):
nxdb.update_nexusport_binding(npb33.port, 200)
| apache-2.0 |
machinelearningnanodegree/stanford-cs231 | solutions/vijendra/assignment3/cs231n/coco_utils.py | 18 | 2650 | import os, json
import numpy as np
import h5py
def load_coco_data(base_dir='cs231n/datasets/coco_captioning',
max_train=None,
pca_features=True):
data = {}
caption_file = os.path.join(base_dir, 'coco2014_captions.h5')
with h5py.File(caption_file, 'r') as f:
for k, v in f.iteritems():
data[k] = np.asarray(v)
if pca_features:
train_feat_file = os.path.join(base_dir, 'train2014_vgg16_fc7_pca.h5')
else:
train_feat_file = os.path.join(base_dir, 'train2014_vgg16_fc7.h5')
with h5py.File(train_feat_file, 'r') as f:
data['train_features'] = np.asarray(f['features'])
if pca_features:
val_feat_file = os.path.join(base_dir, 'val2014_vgg16_fc7_pca.h5')
else:
val_feat_file = os.path.join(base_dir, 'val2014_vgg16_fc7.h5')
with h5py.File(val_feat_file, 'r') as f:
data['val_features'] = np.asarray(f['features'])
dict_file = os.path.join(base_dir, 'coco2014_vocab.json')
with open(dict_file, 'r') as f:
dict_data = json.load(f)
for k, v in dict_data.iteritems():
data[k] = v
train_url_file = os.path.join(base_dir, 'train2014_urls.txt')
with open(train_url_file, 'r') as f:
train_urls = np.asarray([line.strip() for line in f])
data['train_urls'] = train_urls
val_url_file = os.path.join(base_dir, 'val2014_urls.txt')
with open(val_url_file, 'r') as f:
val_urls = np.asarray([line.strip() for line in f])
data['val_urls'] = val_urls
# Maybe subsample the training data
if max_train is not None:
num_train = data['train_captions'].shape[0]
mask = np.random.randint(num_train, size=max_train)
data['train_captions'] = data['train_captions'][mask]
data['train_image_idxs'] = data['train_image_idxs'][mask]
return data
def decode_captions(captions, idx_to_word):
singleton = False
if captions.ndim == 1:
singleton = True
captions = captions[None]
decoded = []
N, T = captions.shape
for i in xrange(N):
words = []
for t in xrange(T):
word = idx_to_word[captions[i, t]]
if word != '<NULL>':
words.append(word)
if word == '<END>':
break
decoded.append(' '.join(words))
if singleton:
decoded = decoded[0]
return decoded
def sample_coco_minibatch(data, batch_size=100, split='train'):
split_size = data['%s_captions' % split].shape[0]
mask = np.random.choice(split_size, batch_size)
captions = data['%s_captions' % split][mask]
image_idxs = data['%s_image_idxs' % split][mask]
image_features = data['%s_features' % split][image_idxs]
urls = data['%s_urls' % split][image_idxs]
return captions, image_features, urls
| mit |
apark263/tensorflow | tensorflow/contrib/image/python/ops/single_image_random_dot_stereograms.py | 36 | 5712 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.ops import gen_single_image_random_dot_stereograms_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_sirds_ops = loader.load_op_library(
resource_loader.get_path_to_datafile(
"_single_image_random_dot_stereograms.so"))
def single_image_random_dot_stereograms(depth_values,
hidden_surface_removal=None,
convergence_dots_size=None,
dots_per_inch=None,
eye_separation=None,
mu=None,
normalize=None,
normalize_max=None,
normalize_min=None,
border_level=None,
number_colors=None,
output_image_shape=None,
output_data_window=None):
"""Output a RandomDotStereogram Tensor for export via encode_PNG/JPG OP.
Given the 2-D tensor 'depth_values' with encoded Z values, this operation
will encode 3-D data into a 2-D image. The output of this Op is suitable
for the encode_PNG/JPG ops. Be careful with image compression as this may
corrupt the encode 3-D data within the image.
Based upon [this
paper](http://www.learningace.com/doc/4331582/b6ab058d1e206d68ab60e4e1ead2fe6e/sirds-paper).
This outputs a SIRDS image as picture_out.png:
```python
img=[[1,2,3,3,2,1],
[1,2,3,4,5,2],
[1,2,3,4,5,3],
[1,2,3,4,5,4],
[6,5,4,4,5,5]]
session = tf.InteractiveSession()
sirds = single_image_random_dot_stereograms(
img,
convergence_dots_size=8,
number_colors=256,normalize=True)
out = sirds.eval()
png = tf.image.encode_png(out).eval()
with open('picture_out.png', 'wb') as f:
f.write(png)
```
Args:
depth_values: A `Tensor`. Must be one of the following types:
`float64`, `float32`, `int64`, `int32`. Z values of data to encode
into 'output_data_window' window, lower further away {0.0 floor(far),
1.0 ceiling(near) after norm}, must be 2-D tensor
hidden_surface_removal: An optional `bool`. Defaults to `True`.
Activate hidden surface removal
convergence_dots_size: An optional `int`. Defaults to `8`.
Black dot size in pixels to help view converge image, drawn on bottom
of the image
dots_per_inch: An optional `int`. Defaults to `72`.
Output device in dots/inch
eye_separation: An optional `float`. Defaults to `2.5`.
Separation between eyes in inches
mu: An optional `float`. Defaults to `0.3333`.
Depth of field, Fraction of viewing distance (eg. 1/3 = 0.3333)
normalize: An optional `bool`. Defaults to `True`.
Normalize input data to [0.0, 1.0]
normalize_max: An optional `float`. Defaults to `-100`.
Fix MAX value for Normalization (0.0) - if < MIN, autoscale
normalize_min: An optional `float`. Defaults to `100`.
Fix MIN value for Normalization (0.0) - if > MAX, autoscale
border_level: An optional `float`. Defaults to `0`.
Value of bord in depth 0.0 {far} to 1.0 {near}
number_colors: An optional `int`. Defaults to `256`. 2 (Black &
White), 256 (grayscale), and Numbers > 256 (Full Color) are
supported
output_image_shape: An optional `tf.TensorShape` or list of `ints`.
Defaults to shape `[1024, 768, 1]`. Defines output shape of returned
image in '[X,Y, Channels]' 1-grayscale, 3 color; channels will be
updated to 3 if number_colors > 256
output_data_window: An optional `tf.TensorShape` or list of `ints`.
Defaults to `[1022, 757]`. Size of "DATA" window, must be equal to or
smaller than `output_image_shape`, will be centered and use
`convergence_dots_size` for best fit to avoid overlap if possible
Returns:
A `Tensor` of type `uint8` of shape 'output_image_shape' with encoded
'depth_values'
"""
result = gen_single_image_random_dot_stereograms_ops.single_image_random_dot_stereograms( # pylint: disable=line-too-long
depth_values=depth_values,
hidden_surface_removal=hidden_surface_removal,
convergence_dots_size=convergence_dots_size,
dots_per_inch=dots_per_inch,
eye_separation=eye_separation,
mu=mu,
normalize=normalize,
normalize_max=normalize_max,
normalize_min=normalize_min,
border_level=border_level,
number_colors=number_colors,
output_image_shape=output_image_shape,
output_data_window=output_data_window)
return result
ops.NotDifferentiable("SingleImageRandomDotStereograms")
| apache-2.0 |
ghutchis/avogadro | libavogadro/src/python/unittest/extension.py | 9 | 1763 | import Avogadro
import unittest
from numpy import *
from PyQt4.Qt import *
import sys
class TestExtension(unittest.TestCase):
def setUp(self):
self.extensions = []
for extension in Avogadro.PluginManager.instance.extensions(None):
self.extensions.append(extension)
self.assertNotEqual(len(self.extensions), 0)
def test_typeName(self):
for extension in self.extensions:
self.assertEqual(extension.type, Avogadro.PluginType.ExtensionType)
self.assertEqual(extension.typeName, "Extensions")
def test_settingsWidget(self):
for extension in self.extensions:
widget = extension.settingsWidget
def test_actions(self):
for extension in self.extensions:
actions = extension.actions
widget = extension.dockWidget
for action in actions:
action.text()
extension.menuPath(action)
def test_performAction(self):
glwidget = Avogadro.GLWidget()
molecule = Avogadro.molecules.addMolecule()
glwidget.molecule = molecule
molecule.addAtom().atomicNumber = 6
for extension in self.extensions:
if extension.name == "Hydrogens":
extension.setMolecule(molecule)
actions = extension.actions
for action in actions:
#print action.text(), " = ", extension.menuPath(action)
if action.text() == "Add Hydrogens":
command = extension.performAction(action, glwidget)
command.redo()
self.assertEqual(molecule.numAtoms, 5)
def test_setMolecule(self):
molecule = Avogadro.molecules.addMolecule()
for extension in self.extensions:
extension.setMolecule(molecule)
if __name__ == "__main__":
app = QApplication(sys.argv)
unittest.main()
sys.exit(app.exec_())
| gpl-2.0 |
openearth/stack | roles/wps/files/wps/openlayers/tests/selenium/remotecontrol/test_ol.py | 254 | 2873 | from selenium import selenium
import time
import sys
from ConfigParser import ConfigParser
MAX_TEST_LENGTH = 300
if len(sys.argv) > 2:
filename = sys.argv[2]
else:
filename = "config.cfg"
c = ConfigParser()
c.read(filename)
targets = {}
server = c.get('config', 'server')
url= c.get('config', 'url')
if c.has_option('config', 'timeout'):
MAX_TEST_LENGTH = int(c.get('config', 'timeout'))
sections = c.sections()
for s in sections:
if s == 'config':
continue
targets[s] = dict(c.items(s))
targets[s]['name'] = s
if sys.argv[1] == "all":
browsers = list(targets.values())
elif sys.argv[1] not in targets:
print "Invalid target"
sys.exit()
else:
browsers = [targets[sys.argv[1]]]
keep_going = True
if 1:
for b in browsers:
if not keep_going:
continue
print "Running %s on %s" % (b['name'], b['host'])
s = selenium(b['host'], 4444, "*%s" % b['browsercmd'], server)
s.start()
try:
s.open_window(url, "test_running")
time.sleep(2)
s.select_window("test_running")
time.sleep(2)
s.refresh()
count = 0
while count == 0:
count = int(s.get_eval("window.document.getElementById('testtable').getElementsByTagName('tr').length"))
time.sleep(5)
ok = 0
fail = 0
last_change = time.time()
while True:
new_ok = int(s.get_eval('window.Test.AnotherWay._g_ok_pages'))
new_fail = int(s.get_eval('window.Test.AnotherWay._g_fail_pages'))
if new_ok != ok or new_fail != fail:
ok = new_ok
fail = new_fail
last_change = time.time()
if (ok + fail) >= count:
break
if time.time() - last_change > MAX_TEST_LENGTH:
raise Exception("Failed: with %s okay and %s failed, ran out of time: %s is more than %s" % (ok, fail, (time.time() - last_change), MAX_TEST_LENGTH))
time.sleep(10)
if fail:
print "Failed: %s" % fail
html = s.get_eval("window.document.getElementById('results').innerHTML").encode("utf-8")
all_html = """<html>
<head>
<meta content="text/html; charset=utf-8" http-equiv="content-type" />
</head>
<body>%s</body></html>""" % html
f = open("fail.%s.%s.html" % (time.time(), b['name']), "w")
f.write(all_html)
f.close()
except KeyboardInterrupt, E:
keep_going = False
print "Stopped by keyboard interrupt"
except Exception, E:
print "Error: ", E
s.stop()
| gpl-3.0 |
huseyinbiyik/plugin.video.videostream | resources/lib/third/humanize/number.py | 10 | 4567 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Humanizing functions for numbers."""
import re
from fractions import Fraction
from .import compat
from .i18n import gettext as _, gettext_noop as N_, pgettext as P_
def ordinal(value):
"""Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',
3 is '3rd', etc. Works for any integer or anything int() will turn into an
integer. Anything other value will have nothing done to it."""
try:
value = int(value)
except (TypeError, ValueError):
return value
t = (P_('0', 'th'),
P_('1', 'st'),
P_('2', 'nd'),
P_('3', 'rd'),
P_('4', 'th'),
P_('5', 'th'),
P_('6', 'th'),
P_('7', 'th'),
P_('8', 'th'),
P_('9', 'th'))
if value % 100 in (11, 12, 13): # special case
return "%d%s" % (value, t[0])
return '%d%s' % (value, t[value % 10])
def intcomma(value):
"""Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'. To maintain
some compatability with Django's intcomma, this function also accepts
floats."""
try:
if isinstance(value, compat.string_types):
float(value.replace(',', ''))
else:
float(value)
except (TypeError, ValueError):
return value
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
powers = [10 ** x for x in (6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 100)]
human_powers = (N_('million'), N_('billion'), N_('trillion'), N_('quadrillion'),
N_('quintillion'), N_('sextillion'), N_('septillion'),
N_('octillion'), N_('nonillion'), N_('decillion'), N_('googol'))
def intword(value, format='%.1f'):
"""Converts a large integer to a friendly text representation. Works best for
numbers over 1 million. For example, 1000000 becomes '1.0 million', 1200000
becomes '1.2 million' and '1200000000' becomes '1.2 billion'. Supports up to
decillion (33 digits) and googol (100 digits). You can pass format to change
the number of decimal or general format of the number portion. This function
returns a string unless the value passed was unable to be coaxed into an int."""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < powers[0]:
return str(value)
for ordinal, power in enumerate(powers[1:], 1):
if value < power:
chopped = value / float(powers[ordinal - 1])
return (' '.join([format, _(human_powers[ordinal - 1])])) % chopped
return str(value)
def apnumber(value):
"""For numbers 1-9, returns the number spelled out. Otherwise, returns the
number. This follows Associated Press style. This always returns a string
unless the value was not int-able, unlike the Django filter."""
try:
value = int(value)
except (TypeError, ValueError):
return value
if not 0 < value < 10:
return str(value)
return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'),
_('seven'), _('eight'), _('nine'))[value - 1]
def fractional(value):
'''
There will be some cases where one might not want to show
ugly decimal places for floats and decimals.
This function returns a human readable fractional number
in form of fractions and mixed fractions.
Pass in a string, or a number or a float, and this function returns
a string representation of a fraction
or whole number
or a mixed fraction
Examples:
fractional(0.3) will return '1/3'
fractional(1.3) will return '1 3/10'
fractional(float(1/3)) will return '1/3'
fractional(1) will return '1'
This will always return a string.
'''
try:
number = float(value)
except (TypeError, ValueError):
return value
wholeNumber = int(number)
frac = Fraction(number - wholeNumber).limit_denominator(1000)
numerator = frac._numerator
denominator = frac._denominator
if wholeNumber and not numerator and denominator == 1:
return '%.0f' % wholeNumber # this means that an integer was passed in (or variants of that integer like 1.0000)
elif not wholeNumber:
return '%.0f/%.0f' % (numerator, denominator)
else:
return '%.0f %.0f/%.0f' % (wholeNumber, numerator, denominator)
| gpl-2.0 |
rooneykev/testAngular | js/node_modules/jasmine/node_modules/jasmine-core/setup.py | 191 | 1983 | from setuptools import setup, find_packages, os
import json
with open('package.json') as packageFile:
version = json.load(packageFile)['version']
setup(
name="jasmine-core",
version=version,
url="http://pivotal.github.io/jasmine/",
author="Pivotal Labs",
author_email="[email protected]",
description=('Jasmine is a Behavior Driven Development testing framework for JavaScript. It does not rely on '+
'browsers, DOM, or any JavaScript framework. Thus it\'s suited for websites, '+
'Node.js (http://nodejs.org) projects, or anywhere that JavaScript can run.'),
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
packages=['jasmine_core', 'jasmine_core.images'],
package_dir={'jasmine_core': 'lib/jasmine-core', 'jasmine_core.images': 'images'},
package_data={'jasmine_core': ['*.js', '*.css'], 'jasmine_core.images': ['*.png']},
include_package_data=True,
install_requires=['glob2>=0.4.1', 'ordereddict==1.1']
)
| mit |
aashish24/VTK-old | Wrapping/Python/vtk/wx/wxVTKRenderWindow.py | 10 | 23312 | """
A simple VTK widget for wxPython.
Find wxPython info at http://wxPython.org
Created by David Gobbi, December 2001
Based on vtkTkRenderWindget.py
Updated to new wx namespace and some cleaning by Andrea Gavana,
December 2006
"""
"""
Please see the example at the end of this file.
----------------------------------------
Creation:
wxVTKRenderWindow(parent, ID, stereo=0, [wx keywords]):
You should create a wx.PySimpleApp() or some other wx**App
before creating the window.
----------------------------------------
Methods:
Render()
AddRenderer(ren)
GetRenderers()
GetRenderWindow()
----------------------------------------
Methods to override (all take a wx.Event):
OnButtonDown(event) default: propagate event to Left, Right, Middle
OnLeftDown(event) default: set _Mode to 'Rotate'
OnRightDown(event) default: set _Mode to 'Zoom'
OnMiddleDown(event) default: set _Mode to 'Pan'
OnButtonUp(event) default: propagate event to L, R, M and unset _Mode
OnLeftUp(event)
OnRightUp(event)
OnMiddleUp(event)
OnMotion(event) default: call appropriate handler for _Mode
OnEnterWindow(event) default: set focus to this window
OnLeaveWindow(event) default: release focus
OnKeyDown(event) default: [R]eset, [W]irefreme, [S]olid, [P]ick
OnKeyUp(event)
OnChar(event)
OnSetFocus(event)
OnKillFocus(event)
OnSize(event)
OnMove(event)
OnPaint(event) default: Render()
----------------------------------------
Protected Members:
_Mode: Current mode: 'Rotate', 'Zoom', 'Pan'
_LastX, _LastY: The (x,y) coordinates of the previous event
_CurrentRenderer: The renderer that was most recently clicked in
_CurrentCamera: The camera for the current renderer
----------------------------------------
Private Members:
__Handle: Handle to the window containing the vtkRenderWindow
"""
# import usual libraries
import math, os, sys
import wx
import vtk
# a few configuration items, see what works best on your system
# Use GLCanvas as base class instead of wx.Window.
# This is sometimes necessary under wxGTK or the image is blank.
# (in wxWindows 2.3.1 and earlier, the GLCanvas had scroll bars)
baseClass = wx.Window
if wx.Platform == "__WXGTK__":
import wx.glcanvas
baseClass = wx.glcanvas.GLCanvas
# Keep capturing mouse after mouse is dragged out of window
# (in wxGTK 2.3.2 there is a bug that keeps this from working,
# but it is only relevant in wxGTK if there are multiple windows)
_useCapture = (wx.Platform == "__WXMSW__")
# end of configuration items
class wxVTKRenderWindow(baseClass):
"""
A wxRenderWindow for wxPython.
Use GetRenderWindow() to get the vtkRenderWindow.
Create with the keyword stereo=1 in order to
generate a stereo-capable window.
"""
def __init__(self, parent, ID, *args, **kw):
"""Default class constructor.
@param parent: parent window
@param ID: window id
@param **kw: wxPython keywords (position, size, style) plus the
'stereo' keyword
"""
# miscellaneous protected variables
self._CurrentRenderer = None
self._CurrentCamera = None
self._CurrentZoom = 1.0
self._CurrentLight = None
self._ViewportCenterX = 0
self._ViewportCenterY = 0
self._Picker = vtk.vtkCellPicker()
self._PickedActor = None
self._PickedProperty = vtk.vtkProperty()
self._PickedProperty.SetColor(1,0,0)
self._PrePickedProperty = None
# these record the previous mouse position
self._LastX = 0
self._LastY = 0
# the current interaction mode (Rotate, Pan, Zoom, etc)
self._Mode = None
self._ActiveButton = None
# private attributes
self.__OldFocus = None
# used by the LOD actors
self._DesiredUpdateRate = 15
self._StillUpdateRate = 0.0001
# First do special handling of some keywords:
# stereo, position, size, width, height, style
stereo = 0
if kw.has_key('stereo'):
if kw['stereo']:
stereo = 1
del kw['stereo']
position = wx.DefaultPosition
if kw.has_key('position'):
position = kw['position']
del kw['position']
try:
size = parent.GetSize()
except AttributeError:
size = wx.DefaultSize
if kw.has_key('size'):
size = kw['size']
del kw['size']
# wx.WANTS_CHARS says to give us e.g. TAB
# wx.NO_FULL_REPAINT_ON_RESIZE cuts down resize flicker under GTK
style = wx.WANTS_CHARS | wx.NO_FULL_REPAINT_ON_RESIZE
if kw.has_key('style'):
style = style | kw['style']
del kw['style']
# the enclosing frame must be shown under GTK or the windows
# don't connect together properly
l = []
p = parent
while p: # make a list of all parents
l.append(p)
p = p.GetParent()
l.reverse() # sort list into descending order
for p in l:
p.Show(1)
# initialize the wx.Window
if baseClass.__name__ == 'GLCanvas':
# Set the doublebuffer attribute of the GL canvas.
baseClass.__init__(self, parent, ID, position, size, style,
attribList=[wx.glcanvas.WX_GL_DOUBLEBUFFER])
else:
baseClass.__init__(self, parent, ID, position, size, style)
# create the RenderWindow and initialize it
self._RenderWindow = vtk.vtkRenderWindow()
self._RenderWindow.SetSize(size.width, size.height)
if stereo:
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
self.__handle = None
# refresh window by doing a Render
self.Bind(wx.EVT_PAINT, self.OnPaint)
# turn off background erase to reduce flicker
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda e: None)
# Bind the events to the event converters
self.Bind(wx.EVT_RIGHT_DOWN, self._OnButtonDown)
self.Bind(wx.EVT_LEFT_DOWN, self._OnButtonDown)
self.Bind(wx.EVT_MIDDLE_DOWN, self._OnButtonDown)
self.Bind(wx.EVT_RIGHT_UP, self._OnButtonUp)
self.Bind(wx.EVT_LEFT_UP, self._OnButtonUp)
self.Bind(wx.EVT_MIDDLE_UP, self._OnButtonUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_ENTER_WINDOW, self._OnEnterWindow)
self.Bind(wx.EVT_LEAVE_WINDOW, self._OnLeaveWindow)
self.Bind(wx.EVT_CHAR, self.OnChar)
# If we use EVT_KEY_DOWN instead of EVT_CHAR, capital versions
# of all characters are always returned. EVT_CHAR also performs
# other necessary keyboard-dependent translations.
self.Bind(wx.EVT_CHAR, self.OnKeyDown)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.Bind(wx.EVT_SIZE, self._OnSize)
self.Bind(wx.EVT_MOVE, self.OnMove)
self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
def SetDesiredUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor.
"""
self._DesiredUpdateRate = rate
def GetDesiredUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor.
"""
return self._DesiredUpdateRate
def SetStillUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor.
"""
self._StillUpdateRate = rate
def GetStillUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor.
"""
return self._StillUpdateRate
def OnPaint(self, event):
"""Handles the wx.EVT_PAINT event for wxVTKRenderWindow.
"""
dc = wx.PaintDC(self)
self.Render()
def _OnSize(self, event):
"""Handles the wx.EVT_SIZE event for wxVTKRenderWindow.
"""
if wx.Platform != '__WXMSW__':
width, height = event.GetSize()
self._RenderWindow.SetSize(width, height)
self.OnSize(event)
self.Render()
def OnSize(self, event):
"""Overridable event.
"""
pass
def OnMove(self, event):
"""Overridable event.
"""
pass
def _OnEnterWindow(self, event):
"""Handles the wx.EVT_ENTER_WINDOW event for
wxVTKRenderWindow.
"""
self.UpdateRenderer(event)
self.OnEnterWindow(event)
def OnEnterWindow(self, event):
"""Overridable event.
"""
if self.__OldFocus == None:
self.__OldFocus = wx.Window.FindFocus()
self.SetFocus()
def _OnLeaveWindow(self, event):
"""Handles the wx.EVT_LEAVE_WINDOW event for
wxVTKRenderWindow.
"""
self.OnLeaveWindow(event)
def OnLeaveWindow(self, event):
"""Overridable event.
"""
if self.__OldFocus:
self.__OldFocus.SetFocus()
self.__OldFocus = None
def OnSetFocus(self, event):
"""Overridable event.
"""
pass
def OnKillFocus(self, event):
"""Overridable event.
"""
pass
def _OnButtonDown(self, event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_DOWN events for
wxVTKRenderWindow.
"""
# helper function for capturing mouse until button released
self._RenderWindow.SetDesiredUpdateRate(self._DesiredUpdateRate)
if event.RightDown():
button = "Right"
elif event.LeftDown():
button = "Left"
elif event.MiddleDown():
button = "Middle"
else:
button = None
# save the button and capture mouse until the button is released
if button and not self._ActiveButton:
self._ActiveButton = button
if _useCapture:
self.CaptureMouse()
self.OnButtonDown(event)
def OnButtonDown(self, event):
"""Overridable event.
"""
if not self._Mode:
# figure out what renderer the mouse is over
self.UpdateRenderer(event)
if event.LeftDown():
self.OnLeftDown(event)
elif event.RightDown():
self.OnRightDown(event)
elif event.MiddleDown():
self.OnMiddleDown(event)
def OnLeftDown(self, event):
"""Overridable event.
"""
if not self._Mode:
if event.ControlDown():
self._Mode = "Zoom"
elif event.ShiftDown():
self._Mode = "Pan"
else:
self._Mode = "Rotate"
def OnRightDown(self, event):
"""Overridable event.
"""
if not self._Mode:
self._Mode = "Zoom"
def OnMiddleDown(self, event):
"""Overridable event.
"""
if not self._Mode:
self._Mode = "Pan"
def _OnButtonUp(self, event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_UP events for
wxVTKRenderWindow.
"""
# helper function for releasing mouse capture
self._RenderWindow.SetDesiredUpdateRate(self._StillUpdateRate)
if event.RightUp():
button = "Right"
elif event.LeftUp():
button = "Left"
elif event.MiddleUp():
button = "Middle"
else:
button = None
# if the ActiveButton is realeased, then release mouse capture
if self._ActiveButton and button == self._ActiveButton:
if _useCapture:
self.ReleaseMouse()
self._ActiveButton = None
self.OnButtonUp(event)
def OnButtonUp(self, event):
"""Overridable event.
"""
if event.LeftUp():
self.OnLeftUp(event)
elif event.RightUp():
self.OnRightUp(event)
elif event.MiddleUp():
self.OnMiddleUp(event)
# if not interacting, then do nothing more
if self._Mode:
if self._CurrentRenderer:
self.Render()
self._Mode = None
def OnLeftUp(self, event):
"""Overridable event.
"""
pass
def OnRightUp(self, event):
"""Overridable event.
"""
pass
def OnMiddleUp(self, event):
"""Overridable event.
"""
pass
def OnMotion(self, event):
"""Overridable event.
"""
if self._Mode == "Pan":
self.Pan(event)
elif self._Mode == "Rotate":
self.Rotate(event)
elif self._Mode == "Zoom":
self.Zoom(event)
def OnChar(self, event):
"""Overridable event.
"""
pass
def OnKeyDown(self, event):
"""Handles the wx.EVT_KEY_DOWN events for wxVTKRenderWindow.
"""
if event.GetKeyCode() == ord('r'):
self.Reset(event)
if event.GetKeyCode() == ord('w'):
self.Wireframe()
if event.GetKeyCode() == ord('s'):
self.Surface()
if event.GetKeyCode() == ord('p'):
self.PickActor(event)
if event.GetKeyCode() < 256:
self.OnChar(event)
def OnKeyUp(self, event):
"""Overridable event.
"""
pass
def GetZoomFactor(self):
"""Returns the current zoom factor.
"""
return self._CurrentZoom
def GetRenderWindow(self):
"""Returns the render window (vtkRenderWindow).
"""
return self._RenderWindow
def GetPicker(self):
"""Returns the current picker (vtkCellPicker).
"""
return self._Picker
def Render(self):
"""Actually renders the VTK scene on screen.
"""
if self._CurrentLight:
light = self._CurrentLight
light.SetPosition(self._CurrentCamera.GetPosition())
light.SetFocalPoint(self._CurrentCamera.GetFocalPoint())
if not self.GetUpdateRegion().IsEmpty() or self.__handle:
if self.__handle and self.__handle == self.GetHandle():
self._RenderWindow.Render()
elif self.GetHandle():
# this means the user has reparented us
# let's adapt to the new situation by doing the WindowRemap
# dance
self._RenderWindow.SetNextWindowInfo(str(self.GetHandle()))
self._RenderWindow.WindowRemap()
# store the new situation
self.__handle = self.GetHandle()
self._RenderWindow.Render()
def UpdateRenderer(self, event):
"""
UpdateRenderer will identify the renderer under the mouse and set
up _CurrentRenderer, _CurrentCamera, and _CurrentLight.
"""
x = event.GetX()
y = event.GetY()
windowX, windowY = self._RenderWindow.GetSize()
renderers = self._RenderWindow.GetRenderers()
numRenderers = renderers.GetNumberOfItems()
self._CurrentRenderer = None
renderers.InitTraversal()
for i in range(0,numRenderers):
renderer = renderers.GetNextItem()
vx,vy = (0,0)
if (windowX > 1):
vx = float(x)/(windowX-1)
if (windowY > 1):
vy = (windowY-float(y)-1)/(windowY-1)
(vpxmin,vpymin,vpxmax,vpymax) = renderer.GetViewport()
if (vx >= vpxmin and vx <= vpxmax and
vy >= vpymin and vy <= vpymax):
self._CurrentRenderer = renderer
self._ViewportCenterX = float(windowX)*(vpxmax-vpxmin)/2.0\
+vpxmin
self._ViewportCenterY = float(windowY)*(vpymax-vpymin)/2.0\
+vpymin
self._CurrentCamera = self._CurrentRenderer.GetActiveCamera()
lights = self._CurrentRenderer.GetLights()
lights.InitTraversal()
self._CurrentLight = lights.GetNextItem()
break
self._LastX = x
self._LastY = y
def GetCurrentRenderer(self):
"""Returns the current renderer.
"""
return self._CurrentRenderer
def Rotate(self, event):
"""Rotates the scene (camera).
"""
if self._CurrentRenderer:
x = event.GetX()
y = event.GetY()
self._CurrentCamera.Azimuth(self._LastX - x)
self._CurrentCamera.Elevation(y - self._LastY)
self._CurrentCamera.OrthogonalizeViewUp()
self._LastX = x
self._LastY = y
self._CurrentRenderer.ResetCameraClippingRange()
self.Render()
def Pan(self, event):
"""Pans the scene (camera).
"""
if self._CurrentRenderer:
x = event.GetX()
y = event.GetY()
renderer = self._CurrentRenderer
camera = self._CurrentCamera
(pPoint0,pPoint1,pPoint2) = camera.GetPosition()
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
if camera.GetParallelProjection():
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetFocalPoint(fx,fy,fz)
renderer.SetWorldPoint(pPoint0,pPoint1,pPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetPosition(fx,fy,fz)
else:
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
# Specify a point location in world coordinates
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
# Convert world point coordinates to display coordinates
dPoint = renderer.GetDisplayPoint()
focalDepth = dPoint[2]
aPoint0 = self._ViewportCenterX + (x - self._LastX)
aPoint1 = self._ViewportCenterY - (y - self._LastY)
renderer.SetDisplayPoint(aPoint0,aPoint1,focalDepth)
renderer.DisplayToWorld()
(rPoint0,rPoint1,rPoint2,rPoint3) = renderer.GetWorldPoint()
if (rPoint3 != 0.0):
rPoint0 = rPoint0/rPoint3
rPoint1 = rPoint1/rPoint3
rPoint2 = rPoint2/rPoint3
camera.SetFocalPoint((fPoint0 - rPoint0) + fPoint0,
(fPoint1 - rPoint1) + fPoint1,
(fPoint2 - rPoint2) + fPoint2)
camera.SetPosition((fPoint0 - rPoint0) + pPoint0,
(fPoint1 - rPoint1) + pPoint1,
(fPoint2 - rPoint2) + pPoint2)
self._LastX = x
self._LastY = y
self.Render()
def Zoom(self, event):
"""Zooms the scene (camera).
"""
if self._CurrentRenderer:
x = event.GetX()
y = event.GetY()
renderer = self._CurrentRenderer
camera = self._CurrentCamera
zoomFactor = math.pow(1.02,(0.5*(self._LastY - y)))
self._CurrentZoom = self._CurrentZoom * zoomFactor
if camera.GetParallelProjection():
parallelScale = camera.GetParallelScale()/zoomFactor
camera.SetParallelScale(parallelScale)
else:
camera.Dolly(zoomFactor)
renderer.ResetCameraClippingRange()
self._LastX = x
self._LastY = y
self.Render()
def Reset(self, event=None):
"""Resets the camera.
"""
if self._CurrentRenderer:
self._CurrentRenderer.ResetCamera()
self.Render()
def Wireframe(self):
"""Sets the current actor representation as wireframe.
"""
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToWireframe()
self.Render()
def Surface(self):
"""Sets the current actor representation as surface.
"""
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToSurface()
self.Render()
def PickActor(self, event):
"""Picks an actor.
"""
if self._CurrentRenderer:
x = event.GetX()
y = event.GetY()
renderer = self._CurrentRenderer
picker = self._Picker
windowX, windowY = self._RenderWindow.GetSize()
picker.Pick(x,(windowY - y - 1),0.0,renderer)
actor = picker.GetActor()
if (self._PickedActor != None and
self._PrePickedProperty != None):
self._PickedActor.SetProperty(self._PrePickedProperty)
# release hold of the property
self._PrePickedProperty.UnRegister(self._PrePickedProperty)
self._PrePickedProperty = None
if (actor != None):
self._PickedActor = actor
self._PrePickedProperty = self._PickedActor.GetProperty()
# hold onto the property
self._PrePickedProperty.Register(self._PrePickedProperty)
self._PickedActor.SetProperty(self._PickedProperty)
self.Render()
#----------------------------------------------------------------------------
def wxVTKRenderWindowConeExample():
"""Like it says, just a simple example.
"""
# every wx app needs an app
app = wx.PySimpleApp()
# create the widget
frame = wx.Frame(None, -1, "wxVTKRenderWindow", size=(400,400))
widget = wxVTKRenderWindow(frame, -1)
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the window
frame.Show()
app.MainLoop()
if __name__ == "__main__":
wxVTKRenderWindowConeExample()
| bsd-3-clause |
joaormatos/anaconda | mmfparser/player/event/expressions/application.py | 1 | 1112 | # Copyright (c) Mathias Kaerlev 2012.
# This file is part of Anaconda.
# Anaconda is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anaconda is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Anaconda. If not, see <http://www.gnu.org/licenses/>.
from mmfparser.player.event.expressions.common import Expression
from mmfparser.player import clipboard
class DroppedFilename(Expression):
def get(self):
return ''
class DroppedFileCount(Expression):
def get(self):
return 0
class FrameRate(Expression):
def get(self):
return self.player.frameRate
class GetClipboard(Expression):
def get(self):
return clipboard.get() | gpl-3.0 |
henriquesouza/toply | TestDisplayer.py | 1 | 2569 | import gi
gi.require_version('Gtk', '3.0')
import threading
import time
from gi.repository import Gtk, Gdk
from gi.repository import GObject as GObject
from src.view import LyricsDisplayer
GObject.threads_init()
insta = LyricsDisplayer.LyricsDisplayer()
#w = Gtk.Window()
w = insta.lyrics_window()
#l = Gtk.Label()
#w.add(l)
#w.show_all()
#w.connect("destroy", lambda _: Gtk.main_quit())
#t = MyThread(l)
#t.start()
class T(threading.Thread):
def __init__(self):
super(T, self).__init__()
#insta.LyricsContent("Me", "u")
self.quit = False
#self.adicionar()
def grampear(self,counter):
line1 = [
"Somehow I know",
"That things are gonna change",
"New boundaries on the way",
"Like never before",
"Find a meaning to your life",
"Hear the whispers of the angels",
"Bring the sunrise again"
]
line2 = ["", "", "","","","","- New day shines"]
if(begin == True):
insta.lyrics_content(line1[counter], line2[counter])
#print("grampear()")
def run(self):
i = 0
s = 0
global begin
begin = False
while not self.quit:
'''
"Somehow I know",
"That things are gonna change",
"New boundaries on the way",
"Like never before",
"Find a meaning to your life",
"Hear the whispers of the angels",
"Bring the sunrise again"
'''
#38 -> 41
if(s >= 1 and s < 2):
begin = True
GObject.idle_add(self.grampear, 0)
elif(s >= 3.5 and s < 44.5):
GObject.idle_add(self.grampear, 1)
elif(s >= 44.5 and s < 46):
GObject.idle_add(self.grampear, 2)
elif(s >= 46 and s < 48.5):
GObject.idle_add(self.grampear, 3)
elif(s >= 48.5 and s < 49.5):
GObject.idle_add(self.grampear, 4)
elif(s >= 52 and s < 53):
GObject.idle_add(self.grampear, 5)
elif(s >= 54 and s < 56):
GObject.idle_add(self.grampear, 6)
if(i == 6):
i = 0
else:
i+=1
#print("sleep()")
time.sleep(0.01)
print(str(s)+"s")
s=round(s,2)+round(0.01,2)
t = T()
t.start()
Gtk.main()
t.quit = True
| gpl-3.0 |
prtx/What-I-learned-in-college | Data_Structures/binary_search_tree.py | 1 | 1159 | class BST_Node:
#initialize binary search tree
def __init__(self, item = None, left = None, right = None):
self.item = item
self.left = left
self.right = right
#traversals
def preorder(self):
print self.item
if self.left:
self.left.inorder()
if self.right:
self.right.inorder()
def inorder(self):
if self.left:
self.left.inorder()
print self.item
if self.right:
self.right.inorder()
def postorder(self):
if self.left:
self.left.inorder()
if self.right:
self.right.inorder()
print self.item
#insert node
def insert(self, item):
if self.item:
if item < self.item:
if self.left is None:
self.left = BST_Node(item)
else:
self.left.insert(item)
elif item > self.item:
if self.right is None:
self.right = BST_Node(item)
else:
self.right.insert(item)
else:
self.item = item
#search in tree
def search(self, item):
if self.item > item:
if self.left is None:
return False
return self.left.search(item)
elif self.item < item:
if self.right is None:
return False
return self.right.search(item)
else:
return True
| mit |
Fermi-Dirac/mathtests | pyggel/event.py | 1 | 12361 | """
pyggle.event
This library (PYGGEL) is licensed under the LGPL by Matthew Roe and PYGGEL contributors.
The event module contains classes to grab and access events.
"""
from .include import *
from . import view
import string
import time
class Keyboard(object):
"""A simple class to store keyboard events."""
def __init__(self):
"""Create the holder.
Attributes:
active -> a list of all keys hit or held
hit -> a list of all keys hit
held -> a list of all keys held"""
self.active = []
self.hook = {}
self.hit = []
self.held = []
def get_ident(self, event):
try:
return str(event)
except:
return event.str
def do_active_hit(self, event):
self.hook[event.key] = self.get_ident(event)
if not event.key in self.active:
self.active.append(event.key)
self.active.append(self.get_ident(event))
self.hit.append(event.key)
self.hit.append(self.get_ident(event))
def do_keyup(self, event):
if event.key in self.active:
self.active.remove(event.key)
self.active.remove(self.hook[event.key])
x = self.hook[event.key]
del self.hook[event.key]
return x
class Mouse(object):
"""A simple class to store mouse events."""
all_names = {1:"left", 2:"middle", 3:"right", 4:"wheel-up", 5:"wheel-down"}
def __init__(self):
"""Create the holder.
Attributes:
active -> a list of all mouse buttons that were clicked or held
hit -> a list of all mouse buttons that were clicked
held -> a list of all mouse buttons that were held"""
self.active = []
self.motion = [0,0]
self.hit = []
self.held = []
def get_pos(self):
"""Return the mouse pos."""
return view.screen.get_mouse_pos()
def get_name(self, button):
"""Return the 'name' that matches the button, ie:
1 -> left
2 -> middle
3 -> right
4 -> wheel-up
5 -> wheel-down"""
if button in self.all_names:
return self.all_names[button]
return "extra-%s"%button
def do_active_hit(self, event):
"""Add a hit event."""
if not event.button in self.active:
name = self.get_name(event.button)
self.active.append(event.button)
self.active.append(name)
self.hit.append(event.button)
self.hit.append(name)
def do_buttonup(self, event):
"""Remove a button from active list."""
if event.button in self.active:
name = self.get_name(event.button)
self.active.remove(event.button)
self.active.remove(name)
class Dispatcher(object):
"""A simple dispatcher class, that allows you to bind functions to events, and execute them all with a single command."""
def __init__(self):
"""Create the Dispatcher object."""
self.name_bindings = {}
def bind(self, name, function):
"""Bind 'function' to the event 'name'.
name can be anything that works as a python dict key (string, number, etc.)
function must be a python function or method"""
if name in self.name_bindings:
self.name_bindings[name].append(function)
else:
self.name_bindings[name] = [function]
def fire(self, name, *args, **kwargs):
"""Execute command 'name', calls any functions bound to this event with args/kwargs.
name can be anything that works as a python dict key (string, number, etc.)
*args/**kwargs are the arguments to use on any function calls bound to this event"""
if name in self.name_bindings:
for func in self.name_bindings[name]:
func(*args, **kwargs)
class Handler(object):
"""A simple event handler. This object catches and stores events, as well as fire off any callbacks attached to them.
There should only ever be one Handler in use at once, as only one handler can get a specific event.
If a gui is used, it will "take control" of teh event handler, ie,
any events it catches will be suppressed here (they can still be accessed at gui_keyboard/gui_mouse)
no callbacks will be fired, no values set - the only exceptions are:
quit, update, mouseup, and keyup - these values will be set, but no callbacks will be fired."""
def __init__(self):
"""Create the handler.
Attributes:
keyboard -> a Keyboard object storing keyboard events
mouse -> a Mouse object storing mouse events
quit -> bool - whether wuit signal has been sent
dispatch -> Dispatcher object used for firing callbacks
uncaught_events -> list of all events the Handler couldn't handle"""
self.keyboard = Keyboard()
self.mouse = Mouse()
self.quit = False
self.dispatch = Dispatcher()
self.uncaught_events = []
self.all_guis = []
self.gui = None
self.gui_keyboard = Keyboard()
self.gui_mouse = Mouse()
self.gui_uncaught_events = []
def bind_to_event(self, event, function):
"""Bind a callback function to an event.
event must be the name of an input event, event names are:
keydown - when a key is pressed
keyup - when a key is released
keyhold - when a mouse key is held
keyactive - when a mouse key is active
mousedown - when a mouse button is pressed
mouseup - when a mouse button is released
mousehold - when a mouse button is held
mouseactive - when a mouse button is active
quit - when the QUIT event was fired (ie the X box on the window is hit)
uncaught-event - when an unsupported event is fired
update - called at end of grabbing events/firing callbacks.
function must be a python function or method that accepts the proper args for each event,
event args are:
keydown, keyup, keyhold: key->Pygame event key, string->the Python str of the key, or the unicode of the key
string will be the key pressed, ie, the a key is "a" (or "A" with shift/caps)
mousedown, mouseup, mousehold: button->Pygame event button, string-> the "name" of the button
string will be "left", "right", "middle", "wheel-up", "wheel-down", or "extra-N" where N is the Pygame event button
uncaught-event: event->the Pygame event
quit, update: None"""
self.dispatch.bind(event, function)
def replace_event(self, event, function):
"""This is the same as bind_to_event, except that it forces function to be the only one attached to the event,
instead of allowing several."""
self.dispatch.name_bindings[event] = []
self.bind_to_event(event, function)
def handle_event(self, event):
"""Handle an event, store in proper object, and fire callbacks."""
if event.type == KEYDOWN:
if self.gui and self.gui.handle_keydown(event.key, str(event.str)):
self.gui_keyboard.do_active_hit(event)
return None
self.keyboard.do_active_hit(event)
self.dispatch.fire("keydown", event.key,
self.keyboard.get_ident(event))
elif event.type == KEYUP:
x = self.keyboard.do_keyup(event)
xb = self.gui_keyboard.do_keyup(event)
if self.gui and self.gui.handle_keyup(event.key, xb):
return None
if x or xb:
self.dispatch.fire("keyup", event.key, x)
else:
if self.gui and self.gui.handle_uncaught_event(event):
self.gui_uncaught_events.append(event)
return None
self.uncaught_events.append(event)
self.dispatch.fire("uncaught-event", event)
elif event.type == MOUSEBUTTONDOWN:
name = self.mouse.get_name(event.button)
if self.gui and self.gui.handle_mousedown(event.button, name):
self.gui_mouse.do_active_hit(event)
return None
self.mouse.do_active_hit(event)
self.dispatch.fire("mousedown", event.button, name)
elif event.type == MOUSEBUTTONUP:
self.gui_mouse.do_buttonup(event)
self.mouse.do_buttonup(event)
name = self.mouse.get_name(event.button)
if self.gui and self.gui.handle_mouseup(event.button, name):
return None
self.dispatch.fire("mouseup", event.button, name)
elif event.type == MOUSEMOTION:
self.gui_mouse.motion[0] += event.rel[0]
self.gui_mouse.motion[1] += event.rel[1]
if self.gui and self.gui.handle_mousemotion(event.rel):
return None
self.dispatch.fire("mousemotion", event.rel)
self.mouse.motion[0] += event.rel[0]
self.mouse.motion[1] += event.rel[1]
elif event.type == QUIT:
self.quit = True
self.dispatch.fire("quit")
else:
if self.gui and self.gui.handle_uncaught_event(event):
self.gui_uncaught_events.append(event)
return None
self.uncaught_events.append(event)
self.dispatch.fire("uncaught-event", event)
def update(self):
"""Grab all events, store in proper objects, and fire callbacks where necessary."""
self.keyboard.hit = []
self.mouse.hit = []
self.mouse.motion = [0,0]
self.gui_mouse.motion = [0,0]
self.uncaught_events = []
self.gui_uncaught_events = []
self.gui_keyboard.hit = []
self.gui_mouse.hit = []
self.keyboard.held = []
self.gui_keyboard.held = []
self.mouse.held = []
self.gui_mouse.held = []
for event in pygame.event.get():
self.handle_event(event)
for i in self.keyboard.active:
if not i in self.keyboard.hit:
self.keyboard.held.append(i) #regardless of type now!
if i in self.keyboard.hook: #make sure these aren't the string names! Or else we would double fire, potentially
eventkey = i
name = self.keyboard.hook[eventkey]
self.dispatch.fire("keyhold", eventkey, name)
self.dispatch.fire("keyactive", eventkey, name)
for i in self.mouse.active:
if not i in self.mouse.hit:
self.mouse.held.append(i)
if type(i) is type(1): #same thing as keys, only slightly different test!
self.dispatch.fire("mousehold", i, self.mouse.get_name(i))
self.dispatch.fire("mouseactive", i, self.mouse.get_name(i))
for i in self.gui_keyboard.active:
if not i in self.gui_keyboard.hit:
self.gui_keyboard.held.append(i) #regardless of type now!
if i in self.gui_keyboard.hook: #make sure these aren't the string names! Or else we would double fire, potentially
eventkey = i
name = self.gui_keyboard.hook[eventkey]
if self.gui:
self.gui.handle_keyhold(eventkey, name)
for i in self.gui_mouse.active:
if not i in self.gui_mouse.hit:
self.gui_mouse.held.append(i)
if type(i) is type(1): #same thing as keys, only slightly different test!
if self.gui:
self.gui.handle_mousehold(i, self.gui_mouse.get_name(i))
self.dispatch.fire("update")
| mit |
swarna-k/MyDiary | flask/lib/python2.7/site-packages/migrate/versioning/script/base.py | 79 | 1700 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from migrate import exceptions
from migrate.versioning.config import operations
from migrate.versioning import pathed
log = logging.getLogger(__name__)
class BaseScript(pathed.Pathed):
"""Base class for other types of scripts.
All scripts have the following properties:
source (script.source())
The source code of the script
version (script.version())
The version number of the script
operations (script.operations())
The operations defined by the script: upgrade(), downgrade() or both.
Returns a tuple of operations.
Can also check for an operation with ex. script.operation(Script.ops.up)
""" # TODO: sphinxfy this and implement it correctly
def __init__(self, path):
log.debug('Loading script %s...' % path)
self.verify(path)
super(BaseScript, self).__init__(path)
log.debug('Script %s loaded successfully' % path)
@classmethod
def verify(cls, path):
"""Ensure this is a valid script
This version simply ensures the script file's existence
:raises: :exc:`InvalidScriptError <migrate.exceptions.InvalidScriptError>`
"""
try:
cls.require_found(path)
except:
raise exceptions.InvalidScriptError(path)
def source(self):
""":returns: source code of the script.
:rtype: string
"""
fd = open(self.path)
ret = fd.read()
fd.close()
return ret
def run(self, engine):
"""Core of each BaseScript subclass.
This method executes the script.
"""
raise NotImplementedError()
| bsd-3-clause |
timopulkkinen/BubbleFish | chrome/common/extensions/docs/server2/reference_resolver.py | 3 | 7449 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from file_system import FileNotFoundError
import logging
import object_store
import re
import string
def _ClassifySchemaNode(node_name, api):
"""Attempt to classify |node_name| in an API, determining whether |node_name|
refers to a type, function, event, or property in |api|.
"""
if '.' in node_name:
node_name, rest = node_name.split('.', 1)
else:
rest = None
for key, group in [('types', 'type'),
('functions', 'method'),
('events', 'event'),
('properties', 'property')]:
for item in api.get(key, []):
if item['name'] == node_name:
if rest is not None:
ret = _ClassifySchemaNode(rest, item)
if ret is not None:
return ret
else:
return group, node_name
return None
def _MakeKey(namespace, ref, title):
return '%s.%s.%s' % (namespace, ref, title)
class ReferenceResolver(object):
"""Resolves references to $ref's by searching through the APIs to find the
correct node.
$ref's have two forms:
$ref:api.node - Replaces the $ref with a link to node on the API page. The
title is set to the name of the node.
$ref:[api.node The Title] - Same as the previous form but title is set to
"The Title".
"""
# Matches after a $ref: that doesn't have []s.
_bare_ref = re.compile('\w+(\.\w+)*')
class Factory(object):
def __init__(self,
api_data_source_factory,
api_list_data_source_factory,
object_store):
self._api_data_source_factory = api_data_source_factory
self._api_list_data_source_factory = api_list_data_source_factory
self._object_store = object_store
def Create(self):
return ReferenceResolver(
self._api_data_source_factory.Create(None, disable_refs=True),
self._api_list_data_source_factory.Create(),
self._object_store)
def __init__(self, api_data_source, api_list_data_source, object_store):
self._api_data_source = api_data_source
self._api_list_data_source = api_list_data_source
self._object_store = object_store
def _GetRefLink(self, ref, api_list, namespace, title):
# Check nodes within each API the ref might refer to.
parts = ref.split('.')
for i, part in enumerate(parts):
api_name = '.'.join(parts[:i])
if api_name not in api_list:
continue
try:
api = self._api_data_source.get(api_name)
except FileNotFoundError:
continue
name = '.'.join(parts[i:])
# Attempt to find |name| in the API.
node_info = _ClassifySchemaNode(name, api)
if node_info is None:
# Check to see if this ref is a property. If it is, we want the ref to
# the underlying type the property is referencing.
for prop in api.get('properties', []):
# If the name of this property is in the ref text, replace the
# property with its type, and attempt to classify it.
if prop['name'] in name and 'link' in prop:
name_as_prop_type = name.replace(prop['name'], prop['link']['name'])
node_info = _ClassifySchemaNode(name_as_prop_type, api)
if node_info is not None:
name = name_as_prop_type
text = ref.replace(prop['name'], prop['link']['name'])
break
if node_info is None:
continue
else:
text = ref
category, node_name = node_info
if namespace is not None and text.startswith('%s.' % namespace):
text = text[len('%s.' % namespace):]
return {
'href': '%s.html#%s-%s' % (api_name, category, name.replace('.', '-')),
'text': title if title else text,
'name': node_name
}
# If it's not a reference to an API node it might just be a reference to an
# API. Check this last so that links within APIs take precedence over links
# to other APIs.
if ref in api_list:
return {
'href': '%s.html' % ref,
'text': title if title else ref,
'name': ref
}
return None
def GetLink(self, ref, namespace=None, title=None):
"""Resolve $ref |ref| in namespace |namespace| if not None, returning None
if it cannot be resolved.
"""
link = self._object_store.Get(_MakeKey(namespace, ref, title),
object_store.REFERENCE_RESOLVER).Get()
if link is not None:
return link
api_list = self._api_list_data_source.GetAllNames()
link = self._GetRefLink(ref, api_list, namespace, title)
if link is None and namespace is not None:
# Try to resolve the ref in the current namespace if there is one.
link = self._GetRefLink('%s.%s' % (namespace, ref),
api_list,
namespace,
title)
if link is not None:
self._object_store.Set(_MakeKey(namespace, ref, title),
link,
object_store.REFERENCE_RESOLVER)
return link
def SafeGetLink(self, ref, namespace=None, title=None):
"""Resolve $ref |ref| in namespace |namespace|, or globally if None. If it
cannot be resolved, pretend like it is a link to a type.
"""
ref_data = self.GetLink(ref, namespace=namespace, title=title)
if ref_data is not None:
return ref_data
logging.error('$ref %s could not be resolved in namespace %s.' %
(ref, namespace))
type_name = ref.rsplit('.', 1)[-1]
return {
'href': '#type-%s' % type_name,
'text': title if title else ref,
'name': ref
}
def ResolveAllLinks(self, text, namespace=None):
"""This method will resolve all $ref links in |text| using namespace
|namespace| if not None. Any links that cannot be resolved will be replaced
using the default link format that |SafeGetLink| uses.
"""
if text is None or '$ref:' not in text:
return text
split_text = text.split('$ref:')
# |split_text| is an array of text chunks that all start with the
# argument to '$ref:'.
formatted_text = [split_text[0]]
for ref_and_rest in split_text[1:]:
title = None
if ref_and_rest.startswith('[') and ']' in ref_and_rest:
# Text was '$ref:[foo.bar maybe title] other stuff'.
ref_with_title, rest = ref_and_rest[1:].split(']', 1)
ref_with_title = ref_with_title.split(None, 1)
if len(ref_with_title) == 1:
# Text was '$ref:[foo.bar] other stuff'.
ref = ref_with_title[0]
else:
# Text was '$ref:[foo.bar title] other stuff'.
ref, title = ref_with_title
else:
# Text was '$ref:foo.bar other stuff'.
match = self._bare_ref.match(ref_and_rest)
if match is None:
ref = ''
rest = ref_and_rest
else:
ref = match.group()
rest = ref_and_rest[match.end():]
ref_dict = self.SafeGetLink(ref, namespace=namespace, title=title)
formatted_text.append('<a href="%(href)s">%(text)s</a>%(rest)s' %
{ 'href': ref_dict['href'], 'text': ref_dict['text'], 'rest': rest })
return ''.join(formatted_text)
| bsd-3-clause |
beepscore/fibonacci | logging_util.py | 1 | 1709 | # https://docs.python.org/3.6/howto/logging.html#logging-basic-tutorial
import logging
import sys
def get_logger(name):
"""
Log to stream only. Don't add a handler to log to a file.
Let program user decide if they want to pipe stream output to a file e.g.
python3 fibonacci.py >> ../fib.log
python3 -m unittest >> ../test.log
References
https://12factor.net/logs
"logging in an application"
https://docs.python-guide.org/writing/logging/
https://stackoverflow.com/questions/22807972/python-best-practice-in-terms-of-logging
https://stackoverflow.com/questions/28330317/print-timestamp-for-logging-in-python
https://docs.python.org/3/library/logging.html#formatter-objects
https://docs.python.org/3.6/howto/logging.html#logging-basic-tutorial
https://docs.python.org/3.6/howto/logging.html#logging-to-a-file
:param name: logger name
:return: a configured logger
"""
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(funcName)s line:%(lineno)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# add one or more handlers
# log stream to terminal stdout. program user can choose to pipe stream to a file.
screen_handler = logging.StreamHandler(stream=sys.stdout)
screen_handler.setFormatter(formatter)
logger.addHandler(screen_handler)
# Don't log to file. See docstring for rationale.
# mode 'a' append, not 'w' write
# handler = logging.FileHandler('./data/output/fib.log', mode='a')
# handler.setFormatter(formatter)
# logger.addHandler(handler)
return logger
| mit |
red-hood/calendarserver | txweb2/auth/basic.py | 1 | 2307 | # -*- test-case-name: txweb2.test.test_httpauth -*-
##
# Copyright (c) 2006-2009 Twisted Matrix Laboratories.
# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
from twisted.cred import credentials, error
from twisted.internet.defer import succeed, fail
from txweb2.auth.interfaces import ICredentialFactory
from zope.interface import implements
class BasicCredentialFactory(object):
"""
Credential Factory for HTTP Basic Authentication
"""
implements(ICredentialFactory)
scheme = 'basic'
def __init__(self, realm):
self.realm = realm
def getChallenge(self, peer):
"""
@see L{ICredentialFactory.getChallenge}
"""
return succeed({'realm': self.realm})
def decode(self, response, request):
"""
Decode the credentials for basic auth.
@see L{ICredentialFactory.decode}
"""
try:
creds = (response + '===').decode('base64')
except:
raise error.LoginFailed('Invalid credentials')
creds = creds.split(':', 1)
if len(creds) == 2:
return succeed(credentials.UsernamePassword(*creds))
else:
return fail(error.LoginFailed('Invalid credentials'))
| apache-2.0 |
Klaudit/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/__init__.py | 137 | 17502 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message):
if mode in gyp.debug.keys():
print "%s: %s" % (mode.upper(), message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file[-len(extension):] == extension:
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params={}, check=False):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
default_variables.update(generator.generator_default_variables)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'generator_wants_absolute_build_file_paths':
getattr(generator, 'generator_wants_absolute_build_file_paths', False),
'generator_handles_variants':
getattr(generator, 'generator_handles_variants', False),
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check)
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
flags.append(FormatOpt(flag, predicate(flag_value)))
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('--msvs-version', dest='msvs_version',
regenerate=False,
help='Deprecated; use -G msvs_version=MSVS_VERSION instead')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables" '
'and "general"')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
# We read a few things from ~/.gyp, so set up a var for that.
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
home = None
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
break
home_dot_gyp = None
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
# TODO(thomasvl): add support for ~/.gyp/defaults
(options, build_files_arg) = parser.parse_args(args)
build_files = build_files_arg
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split('[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
options.formats = [ {'darwin': 'xcode',
'win32': 'msvs',
'cygwin': 'msvs',
'freebsd7': 'make',
'freebsd8': 'make',
'linux2': 'scons',}[sys.platform] ]
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for (option, value) in options.__dict__.items():
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'" % (option, value))
else:
DebugOutput(DEBUG_GENERAL, " %s: %s" % (option, str(value)))
if not build_files:
build_files = FindBuildFiles()
if not build_files:
print >>sys.stderr, (usage + '\n\n%s: error: no build_file') % \
(my_name, my_name)
return 1
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise Exception, \
'Could not automatically locate src directory. This is a ' + \
'temporary Chromium feature that will be removed. Use ' + \
'--depth as a workaround.'
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s" % cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s" % generator_flags)
# TODO: Remove this and the option after we've gotten folks to move to the
# generator flag.
if options.msvs_version:
print >>sys.stderr, \
'DEPRECATED: Use generator flag (-G msvs_version=' + \
options.msvs_version + ') instead of --msvs-version=' + \
options.msvs_version
generator_flags['msvs_version'] = options.msvs_version
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(build_files, format,
cmdline_default_variables,
includes, options.depth,
params, options.check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
# Done
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
peterjoel/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/executors/executorselenium.py | 2 | 14864 | import json
import os
import socket
import threading
import time
import traceback
import urlparse
import uuid
from .base import (CallbackHandler,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
extra_timeout,
strip_server)
from .protocol import (BaseProtocolPart,
TestharnessProtocolPart,
Protocol,
SelectorProtocolPart,
ClickProtocolPart,
SendKeysProtocolPart,
ActionSequenceProtocolPart,
TestDriverProtocolPart)
from ..testrunner import Stop
here = os.path.join(os.path.split(__file__)[0])
webdriver = None
exceptions = None
RemoteConnection = None
Command = None
def do_delayed_imports():
global webdriver
global exceptions
global RemoteConnection
global Command
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.remote.remote_connection import RemoteConnection
from selenium.webdriver.remote.command import Command
class SeleniumBaseProtocolPart(BaseProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def execute_script(self, script, async=False):
method = self.webdriver.execute_async_script if async else self.webdriver.execute_script
return method(script)
def set_timeout(self, timeout):
self.webdriver.set_script_timeout(timeout * 1000)
@property
def current_window(self):
return self.webdriver.current_window_handle
def set_window(self, handle):
self.webdriver.switch_to_window(handle)
def wait(self):
while True:
try:
self.webdriver.execute_async_script("")
except exceptions.TimeoutException:
pass
except (socket.timeout, exceptions.NoSuchWindowException,
exceptions.ErrorInResponseException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class SeleniumTestharnessProtocolPart(TestharnessProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
self.runner_handle = None
with open(os.path.join(here, "runner.js")) as f:
self.runner_script = f.read()
def load_runner(self, url_protocol):
if self.runner_handle:
self.webdriver.switch_to_window(self.runner_handle)
url = urlparse.urljoin(self.parent.executor.server_url(url_protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.get(url)
self.runner_handle = self.webdriver.current_window_handle
format_map = {"title": threading.current_thread().name.replace("'", '"')}
self.parent.base.execute_script(self.runner_script % format_map)
def close_old_windows(self):
handles = [item for item in self.webdriver.window_handles if item != self.runner_handle]
for handle in handles:
try:
self.webdriver.switch_to_window(handle)
self.webdriver.close()
except exceptions.NoSuchWindowException:
pass
self.webdriver.switch_to_window(self.runner_handle)
return self.runner_handle
def get_test_window(self, window_id, parent, timeout=5):
"""Find the test window amongst all the open windows.
This is assumed to be either the named window or the one after the parent in the list of
window handles
:param window_id: The DOM name of the Window
:param parent: The handle of the runner window
:param timeout: The time in seconds to wait for the window to appear. This is because in
some implementations there's a race between calling window.open and the
window being added to the list of WebDriver accessible windows."""
test_window = None
end_time = time.time() + timeout
while time.time() < end_time:
try:
# Try using the JSON serialization of the WindowProxy object,
# it's in Level 1 but nothing supports it yet
win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
win_obj = json.loads(win_s)
test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
except Exception:
pass
if test_window is None:
after = self.webdriver.window_handles
if len(after) == 2:
test_window = next(iter(set(after) - set([parent])))
elif after[0] == parent and len(after) > 2:
# Hope the first one here is the test window
test_window = after[1]
if test_window is not None:
assert test_window != parent
return test_window
time.sleep(0.1)
raise Exception("unable to find test window")
class SeleniumSelectorProtocolPart(SelectorProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def elements_by_selector(self, selector):
return self.webdriver.find_elements_by_css_selector(selector)
class SeleniumClickProtocolPart(ClickProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def element(self, element):
return element.click()
class SeleniumSendKeysProtocolPart(SendKeysProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_keys(self, element, keys):
return element.send_keys(keys)
class SeleniumActionSequenceProtocolPart(ActionSequenceProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_actions(self, actions):
self.webdriver.execute(Command.W3C_ACTIONS, {"actions": actions})
class SeleniumTestDriverProtocolPart(TestDriverProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_message(self, message_type, status, message=None):
obj = {
"type": "testdriver-%s" % str(message_type),
"status": str(status)
}
if message:
obj["message"] = str(message)
self.webdriver.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
class SeleniumProtocol(Protocol):
implements = [SeleniumBaseProtocolPart,
SeleniumTestharnessProtocolPart,
SeleniumSelectorProtocolPart,
SeleniumClickProtocolPart,
SeleniumSendKeysProtocolPart,
SeleniumTestDriverProtocolPart,
SeleniumActionSequenceProtocolPart]
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
super(SeleniumProtocol, self).__init__(executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def connect(self):
"""Connect to browser via Selenium's WebDriver implementation."""
self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"),
resolve_ip=False),
desired_capabilities=self.capabilities)
def teardown(self):
self.logger.debug("Hanging up on Selenium session")
try:
self.webdriver.quit()
except Exception:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.current_window_handle
# TODO what exception?
except (socket.timeout, exceptions.ErrorInResponseException):
return False
return True
def after_connect(self):
self.testharness.load_runner(self.executor.last_environment["protocol"])
class SeleniumRun(object):
def __init__(self, func, protocol, url, timeout):
self.func = func
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.protocol.base.set_timeout((timeout + extra_timeout))
except exceptions.ErrorInResponseException:
self.logger.error("Lost WebDriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
if flag:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self._run fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "self._run didn't set a result")
else:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.protocol, self.url, self.timeout)
except exceptions.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, exceptions.ErrorInResponseException):
self.result = False, ("CRASH", None)
except Exception as e:
message = str(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class SeleniumTestharnessExecutor(TestharnessExecutor):
supports_testdriver = True
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None,
**kwargs):
"""Selenium-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
self.script_resume = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.testharness.load_runner(new_environment["protocol"])
def do_test(self, test):
url = self.test_url(test)
success, data = SeleniumRun(self.do_testharness,
self.protocol,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, protocol, url, timeout):
format_map = {"url": strip_server(url)}
parent_window = protocol.testharness.close_old_windows()
# Now start the test harness
protocol.base.execute_script("window.open('about:blank', '%s', 'noopener')" % self.window_id)
test_window = protocol.testharness.get_test_window(self.window_id, parent_window,
timeout=5*self.timeout_multiplier)
self.protocol.base.set_window(test_window)
protocol.webdriver.get(url)
handler = CallbackHandler(self.logger, protocol, test_window)
while True:
result = protocol.base.execute_script(
self.script_resume % format_map, async=True)
done, rv = handler(result)
if done:
break
return rv
class SeleniumRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None, **kwargs):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def reset(self):
self.implementation.reset()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
self.logger.info("Test requires OS-level window focus")
width_offset, height_offset = self.protocol.webdriver.execute_script(
"""return [window.outerWidth - window.innerWidth,
window.outerHeight - window.innerHeight];"""
)
self.protocol.webdriver.set_window_size(0, 0)
self.protocol.webdriver.set_window_position(800 + width_offset, 600 + height_offset)
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
return SeleniumRun(self._screenshot,
self.protocol,
self.test_url(test),
test.timeout).run()
def _screenshot(self, protocol, url, timeout):
webdriver = protocol.webdriver
webdriver.get(url)
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.get_screenshot_as_base64()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
| mpl-2.0 |
chaen/DIRAC | DataManagementSystem/scripts/dirac-dms-create-removal-request.py | 4 | 3446 | #!/usr/bin/env python
""" Create a DIRAC RemoveReplica|RemoveFile request to be executed by the RMS
"""
from __future__ import print_function
__RCSID__ = "ea64b42 (2012-07-29 16:45:05 +0200) ricardo <[email protected]>"
import os
from hashlib import md5
import time
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.List import breakListIntoChunks
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[0],
__doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... SE LFN ...' % Script.scriptName,
'Arguments:',
' SE: StorageElement|All',
' LFN: LFN or file containing a List of LFNs' ] ) )
Script.parseCommandLine( ignoreErrors = False )
args = Script.getPositionalArgs()
if len( args ) < 2:
Script.showHelp()
targetSE = args.pop( 0 )
lfns = []
for inputFileName in args:
if os.path.exists( inputFileName ):
inputFile = open( inputFileName, 'r' )
string = inputFile.read()
inputFile.close()
lfns.extend( [ lfn.strip() for lfn in string.splitlines() ] )
else:
lfns.append( inputFileName )
from DIRAC.Resources.Storage.StorageElement import StorageElement
import DIRAC
# Check is provided SE is OK
if targetSE != 'All':
se = StorageElement( targetSE )
if not se.valid:
print(se.errorReason)
print()
Script.showHelp()
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
reqClient = ReqClient()
fc = FileCatalog()
requestOperation = 'RemoveReplica'
if targetSE == 'All':
requestOperation = 'RemoveFile'
for lfnList in breakListIntoChunks( lfns, 100 ):
oRequest = Request()
requestName = "%s_%s" % ( md5( repr( time.time() ) ).hexdigest()[:16], md5( repr( time.time() ) ).hexdigest()[:16] )
oRequest.RequestName = requestName
oOperation = Operation()
oOperation.Type = requestOperation
oOperation.TargetSE = targetSE
res = fc.getFileMetadata( lfnList )
if not res['OK']:
print("Can't get file metadata: %s" % res['Message'])
DIRAC.exit( 1 )
if res['Value']['Failed']:
print("Could not get the file metadata of the following, so skipping them:")
for fFile in res['Value']['Failed']:
print(fFile)
lfnMetadata = res['Value']['Successful']
for lfn in lfnMetadata:
rarFile = File()
rarFile.LFN = lfn
rarFile.Size = lfnMetadata[lfn]['Size']
rarFile.Checksum = lfnMetadata[lfn]['Checksum']
rarFile.GUID = lfnMetadata[lfn]['GUID']
rarFile.ChecksumType = 'ADLER32'
oOperation.addFile( rarFile )
oRequest.addOperation( oOperation )
isValid = RequestValidator().validate( oRequest )
if not isValid['OK']:
print("Request is not valid: ", isValid['Message'])
DIRAC.exit( 1 )
result = reqClient.putRequest( oRequest )
if result['OK']:
print('Request %d Submitted' % result['Value'])
else:
print('Failed to submit Request: ', result['Message'])
| gpl-3.0 |
jackkiej/SickRage | lib/guessit/rules/properties/mimetype.py | 32 | 1032 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
mimetype property
"""
import mimetypes
from rebulk import Rebulk, CustomRule, POST_PROCESS
from rebulk.match import Match
from ...rules.processors import Processors
def mimetype():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
return Rebulk().rules(Mimetype)
class Mimetype(CustomRule):
"""
Mimetype post processor
:param matches:
:type matches:
:return:
:rtype:
"""
priority = POST_PROCESS
dependency = Processors
def when(self, matches, context):
mime, _ = mimetypes.guess_type(matches.input_string, strict=False)
return mime
def then(self, matches, when_response, context):
mime = when_response
matches.append(Match(len(matches.input_string), len(matches.input_string), name='mimetype', value=mime))
@property
def properties(self):
"""
Properties for this rule.
"""
return {'mimetype': [None]}
| gpl-3.0 |
jarshwah/django | django/utils/dateformat.py | 57 | 11885 | """
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print(df.format('jS F Y H:i'))
7th October 2003 11:39
>>>
"""
from __future__ import unicode_literals
import calendar
import datetime
import re
import time
from django.utils import six
from django.utils.dates import (
MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR,
)
from django.utils.encoding import force_text
from django.utils.timezone import get_default_timezone, is_aware, is_naive
from django.utils.translation import ugettext as _
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_text(formatstr))):
if i % 2:
if type(self.data) is datetime.date and hasattr(TimeFormat, piece):
raise TypeError(
"The format for date objects may not contain "
"time-related format specifiers (found '%s')." % piece
)
pieces.append(force_text(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return ''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, obj):
self.data = obj
self.timezone = None
# We only support timezone when formatting datetime objects,
# not date objects (timezone information not appropriate),
# or time objects (against established django policy).
if isinstance(obj, datetime.datetime):
if is_naive(obj):
self.timezone = get_default_timezone()
else:
self.timezone = obj.tzinfo
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError('may be implemented in a future release')
def e(self):
"""
Timezone name.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
try:
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
# Have to use tzinfo.tzname and not datetime.tzname
# because datatime.tzname does not expect Unicode
return self.data.tzinfo.tzname(self.data) or ""
except NotImplementedError:
pass
return ""
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return '%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return '%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return '%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return '%02d' % self.data.minute
def O(self):
"""
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
seconds = self.Z()
if seconds == "":
return ""
sign = '-' if seconds < 0 else '+'
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return '%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return '%02d' % self.data.second
def T(self):
"""
Time zone of this machine; e.g. 'EST' or 'MDT'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
name = None
try:
name = self.timezone.tzname(self.data)
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
pass
if name is None:
name = self.format('O')
return six.text_type(name)
def u(self):
"Microseconds; i.e. '000000' to '999999'"
return '%06d' % self.data.microsecond
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
try:
offset = self.timezone.utcoffset(self.data)
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
return ""
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return '%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
try:
if self.timezone and self.timezone.dst(self.data):
return '1'
else:
return '0'
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
return ''
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return '%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def r(self):
"RFC 5322 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return 'th'
last = self.data.day % 10
if last == 1:
return 'st'
if last == 2:
return 'nd'
if last == 3:
return 'rd'
return 'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if isinstance(self.data, datetime.datetime) and is_aware(self.data):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year - 1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return six.text_type(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
| bsd-3-clause |
JuniorCru/coastline | coastline/config.py | 1 | 1643 | import json
BASE_ENV = 'common'
ENV_OPTIONS = ['development', 'staging', 'production']
# Currently Config class is just a dict sub-class.
#
# A dict-like interface may be all we need, but we use a sub-class so we
# can change construction later, possibly use an IoC container or type
# checking, etc.
class Config(dict):
def __init__(self, *args, file_path=None, env_name=None, **kwargs):
self.file_path = file_path
self.env_name = env_name
super().__init__(*args, **kwargs)
# Add a nicer repr which includes the class name and extra attrs
def __repr__(self):
return ('{cls_name}('
'{parent_repr}, '
'file_path={file_path!r}, '
'env_name={env_name!r})'
).format(
cls_name=self.__class__.__name__,
parent_repr=super().__repr__(),
file_path=self.file_path,
env_name=self.env_name)
# Even an "empty" Config object is considered "true"
def __bool__(self):
return True
def fold_dicts(*dicts):
if not dicts:
return None
result = dicts[0]
for d in dicts[1:]:
result.update(d)
return result
def fold_tree_by_env_name(json_tree, env_name):
base_tree = json_tree.get(BASE_ENV, {})
env_tree = json_tree.get(env_name, {})
return fold_dicts(base_tree, env_tree)
def config_from_path(config_path, env_name):
json_tree = json.load(open(config_path))
folded_tree = fold_tree_by_env_name(json_tree, env_name)
return Config(folded_tree, file_path=config_path, env_name=env_name)
| mit |
2013Commons/hue | desktop/core/ext-py/PyYAML-3.09/tests/lib3/test_input_output.py | 57 | 6057 |
import yaml
import codecs, io, tempfile, os, os.path
def test_unicode_input(unicode_filename, verbose=False):
data = open(unicode_filename, 'rb').read().decode('utf-8')
value = ' '.join(data.split())
output = yaml.load(data)
assert output == value, (output, value)
output = yaml.load(io.StringIO(data))
assert output == value, (output, value)
for input in [data.encode('utf-8'),
codecs.BOM_UTF8+data.encode('utf-8'),
codecs.BOM_UTF16_BE+data.encode('utf-16-be'),
codecs.BOM_UTF16_LE+data.encode('utf-16-le')]:
if verbose:
print("INPUT:", repr(input[:10]), "...")
output = yaml.load(input)
assert output == value, (output, value)
output = yaml.load(io.BytesIO(input))
assert output == value, (output, value)
test_unicode_input.unittest = ['.unicode']
def test_unicode_input_errors(unicode_filename, verbose=False):
data = open(unicode_filename, 'rb').read().decode('utf-8')
for input in [data.encode('latin1', 'ignore'),
data.encode('utf-16-be'), data.encode('utf-16-le'),
codecs.BOM_UTF8+data.encode('utf-16-be'),
codecs.BOM_UTF16_BE+data.encode('utf-16-le'),
codecs.BOM_UTF16_LE+data.encode('utf-8')+b'!']:
try:
yaml.load(input)
except yaml.YAMLError as exc:
if verbose:
print(exc)
else:
raise AssertionError("expected an exception")
try:
yaml.load(io.BytesIO(input))
except yaml.YAMLError as exc:
if verbose:
print(exc)
else:
raise AssertionError("expected an exception")
test_unicode_input_errors.unittest = ['.unicode']
def test_unicode_output(unicode_filename, verbose=False):
data = open(unicode_filename, 'rb').read().decode('utf-8')
value = ' '.join(data.split())
for allow_unicode in [False, True]:
data1 = yaml.dump(value, allow_unicode=allow_unicode)
for encoding in [None, 'utf-8', 'utf-16-be', 'utf-16-le']:
stream = io.StringIO()
yaml.dump(value, stream, encoding=encoding, allow_unicode=allow_unicode)
data2 = stream.getvalue()
data3 = yaml.dump(value, encoding=encoding, allow_unicode=allow_unicode)
if encoding is not None:
assert isinstance(data3, bytes)
data3 = data3.decode(encoding)
stream = io.BytesIO()
if encoding is None:
try:
yaml.dump(value, stream, encoding=encoding, allow_unicode=allow_unicode)
except TypeError as exc:
if verbose:
print(exc)
data4 = None
else:
raise AssertionError("expected an exception")
else:
yaml.dump(value, stream, encoding=encoding, allow_unicode=allow_unicode)
data4 = stream.getvalue()
if verbose:
print("BYTES:", data4[:50])
data4 = data4.decode(encoding)
for copy in [data1, data2, data3, data4]:
if copy is None:
continue
assert isinstance(copy, str)
if allow_unicode:
try:
copy[4:].encode('ascii')
except UnicodeEncodeError as exc:
if verbose:
print(exc)
else:
raise AssertionError("expected an exception")
else:
copy[4:].encode('ascii')
assert isinstance(data1, str), (type(data1), encoding)
assert isinstance(data2, str), (type(data2), encoding)
test_unicode_output.unittest = ['.unicode']
def test_file_output(unicode_filename, verbose=False):
data = open(unicode_filename, 'rb').read().decode('utf-8')
handle, filename = tempfile.mkstemp()
os.close(handle)
try:
stream = io.StringIO()
yaml.dump(data, stream, allow_unicode=True)
data1 = stream.getvalue()
stream = io.BytesIO()
yaml.dump(data, stream, encoding='utf-16-le', allow_unicode=True)
data2 = stream.getvalue().decode('utf-16-le')[1:]
stream = open(filename, 'w', encoding='utf-16-le')
yaml.dump(data, stream, allow_unicode=True)
stream.close()
data3 = open(filename, 'r', encoding='utf-16-le').read()
stream = open(filename, 'wb')
yaml.dump(data, stream, encoding='utf-8', allow_unicode=True)
stream.close()
data4 = open(filename, 'r', encoding='utf-8').read()
assert data1 == data2, (data1, data2)
assert data1 == data3, (data1, data3)
assert data1 == data4, (data1, data4)
finally:
if os.path.exists(filename):
os.unlink(filename)
test_file_output.unittest = ['.unicode']
def test_unicode_transfer(unicode_filename, verbose=False):
data = open(unicode_filename, 'rb').read().decode('utf-8')
for encoding in [None, 'utf-8', 'utf-16-be', 'utf-16-le']:
input = data
if encoding is not None:
input = ('\ufeff'+input).encode(encoding)
output1 = yaml.emit(yaml.parse(input), allow_unicode=True)
if encoding is None:
stream = io.StringIO()
else:
stream = io.BytesIO()
yaml.emit(yaml.parse(input), stream, allow_unicode=True)
output2 = stream.getvalue()
assert isinstance(output1, str), (type(output1), encoding)
if encoding is None:
assert isinstance(output2, str), (type(output1), encoding)
else:
assert isinstance(output2, bytes), (type(output1), encoding)
output2.decode(encoding)
test_unicode_transfer.unittest = ['.unicode']
if __name__ == '__main__':
import test_appliance
test_appliance.run(globals())
| apache-2.0 |
jctanner/scrapers | scommerce_com.py | 1 | 4085 | #!/usr/bin/env python
# http://sccommerce.com/sc-industrial-directory
import requests
import requests_cache
from pprint import pprint
from bs4 import BeautifulSoup
from lib.csvtools import dict_to_csv
def main():
# This page has a search form that must be submitted to get the list of companies.
# To post data to a form in python, a dictionary of parameters should be created
# and passed into the post url. The parameters and values for this form were found
# by opening the developers tools in firefox and inspecting the parameters sent
# by pressing the 'search' button.
companies = {}
requests_cache.install_cache('scommerce_cache')
# Use sessions to persist cookies and formdata
baseurl = 'http://sccommerce.com'
s = requests.Session()
r = s.get('%s/sc-industrial-directory' % baseurl)
rsoup = BeautifulSoup(r.text, 'html.parser')
souplines = [x for x in rsoup.prettify().split('\n')]
# Grab the unique form ID specific to this session ...
# <input type="hidden" name="form_build_id" value="form-ucL4nG9DvogNwbCLlTuXeHfME05gn4KrK1AA1mPmW0M" />
iform = rsoup.find('input', {'name': 'form_build_id', 'type': 'hidden'})
params = {'keywords': '',
'name': '',
'operation_type': '',
'employee_count': 0,
'parent_company': '',
'op': 'Search',
'form_build_id': iform.attrs['value'],
'form_id': 'scapi_search_form' }
# Keep all the result page(s) soups
result_soups = []
# Keep all the company pages
company_pages = []
# Post the parameters
pr = s.post('http://sccommerce.com/sc-industrial-directory', data=params)
prsoup = BeautifulSoup(pr.text, 'html.parser')
result_soups.append(prsoup)
# Iterate through every page of results by following the 'next' href ...
next_page = prsoup.find('a', {'class': 'page-next active'}).attrs['href']
print next_page
while next_page:
try:
nr = s.get('%s/%s' % (baseurl, next_page))
nrsoup = BeautifulSoup(nr.text, 'html.parser')
result_soups.append(nrsoup)
next_page = nrsoup.find('a', {'class': 'page-next active'}).attrs['href']
print next_page
except Exception as e:
print e
next_page = None
# Results are in <table class="results-table">
for rs in result_soups:
rtable = rs.find('table', {'class': 'results-table'})
#for th in rtable.findAll('th'):
# print th
for tr in rtable.findAll('tr'):
#print tr
link = tr.find('a').attrs['href']
link = baseurl + link
if '/company/' in link:
#print link
if link not in company_pages:
company_pages.append(link)
'''
<h1 class="title">680 Screened Tees</h1>
<div class="details">
<p>
<b>Address:</b> 680 Violet St</p>
<p>
<b>City:</b> West Columbia</p>
<p>
<b>Zip:</b> 29169</p>
'''
# sort the company pages
company_pages = sorted(set(company_pages))
total_companies = len(company_pages)
# iterate through each and get details
for idx,cp in enumerate(company_pages):
cdata = {}
print idx,total_companies,cp
cr = s.get('%s/%s' % (cp, next_page))
csoup = BeautifulSoup(cr.text, 'html.parser')
cname = csoup.find('h1', {'class': 'title'}).text.strip().encode('ascii', 'ignore')
cdata['name'] = cname
ddiv = csoup.find('div', {'class': 'details'})
for par in ddiv.findAll('p'):
#print par
parts = par.text.strip().split(':', 1)
key = parts[0].strip().encode('ascii', 'ignore')
cdata[key] = parts[1].strip().encode('ascii', 'ignore')
companies[cname] = cdata
pprint(cdata)
#import pdb; pdb.set_trace()
dict_to_csv(companies, 'scommerce.csv')
#import pdb; pdb.set_trace()
if __name__ == "__main__":
main()
| apache-2.0 |
mcmaxwell/idea_digital_agency | idea/feincms/module/mixins.py | 2 | 7981 | from __future__ import absolute_import, unicode_literals
from collections import OrderedDict
from django.http import Http404
from django.template import Template
from django.utils.decorators import method_decorator
from django.views import generic
from django.views.generic.base import TemplateResponseMixin
from feincms import settings
from feincms.apps import standalone
class ContentModelMixin(object):
"""
Mixin for ``feincms.models.Base`` subclasses which need need some degree of
additional control over the request-response cycle.
"""
#: Collection of request processors
request_processors = None
#: Collection of response processors
response_processors = None
@classmethod
def register_request_processor(cls, fn, key=None):
"""
Registers the passed callable as request processor. A request processor
always receives two arguments, the current object and the request.
"""
if cls.request_processors is None:
cls.request_processors = OrderedDict()
cls.request_processors[fn if key is None else key] = fn
@classmethod
def register_response_processor(cls, fn, key=None):
"""
Registers the passed callable as response processor. A response
processor always receives three arguments, the current object, the
request and the response.
"""
if cls.response_processors is None:
cls.response_processors = OrderedDict()
cls.response_processors[fn if key is None else key] = fn
# TODO Implement admin_urlname templatetag protocol
@property
def app_label(self):
"""
Implement the admin_urlname templatetag protocol, so one can easily
generate an admin link using ::
{% url page|admin_urlname:'change' page.id %}
"""
return self._meta.app_label
@property
def model_name(self):
"See app_label"
return self.__class__.__name__.lower()
class ContentObjectMixin(TemplateResponseMixin):
"""
Mixin for Django's class based views which knows how to handle
``ContentModelMixin`` detail pages.
This is a mixture of Django's ``SingleObjectMixin`` and
``TemplateResponseMixin`` conceptually to support FeinCMS'
``ApplicationContent`` inheritance. It does not inherit
``SingleObjectMixin`` however, because that would set a
precedence for the way how detail objects are determined
(and would f.e. make the page and blog module implementation
harder).
"""
context_object_name = None
def handler(self, request, *args, **kwargs):
if not hasattr(self.request, '_feincms_extra_context'):
self.request._feincms_extra_context = {}
r = self.run_request_processors()
if r:
return r
r = self.process_content_types()
if r:
return r
response = self.render_to_response(self.get_context_data())
r = self.finalize_content_types(response)
if r:
return r
r = self.run_response_processors(response)
if r:
return r
return response
def get_template_names(self):
# According to the documentation this method is supposed to return
# a list. However, we can also return a Template instance...
if isinstance(self.template_name, (Template, list, tuple)):
return self.template_name
if self.template_name:
return [self.template_name]
self.object._needs_templates()
if self.object.template.path:
return [self.object.template.path]
# Hopefully someone else has a usable get_template_names()
# implementation...
return super(ContentObjectMixin, self).get_template_names()
def get_context_data(self, **kwargs):
context = self.request._feincms_extra_context
context[self.context_object_name or 'feincms_object'] = self.object
context.update(kwargs)
return super(ContentObjectMixin, self).get_context_data(**context)
@property
def __name__(self):
"""
Dummy property to make this handler behave like a normal function.
This property is used by django-debug-toolbar
"""
return self.__class__.__name__
def run_request_processors(self):
"""
Before rendering an object, run all registered request processors. A
request processor may peruse and modify the page or the request. It can
also return a ``HttpResponse`` for shortcutting the rendering and
returning that response immediately to the client.
"""
if not getattr(self.object, 'request_processors', None):
return
for fn in reversed(list(self.object.request_processors.values())):
r = fn(self.object, self.request)
if r:
return r
def run_response_processors(self, response):
"""
After rendering an object to a response, the registered response
processors are called to modify the response, eg. for setting cache or
expiration headers, keeping statistics, etc.
"""
if not getattr(self.object, 'response_processors', None):
return
for fn in self.object.response_processors.values():
r = fn(self.object, self.request, response)
if r:
return r
def process_content_types(self):
"""
Run the ``process`` method of all content types sporting one
"""
# store eventual Http404 exceptions for re-raising,
# if no content type wants to handle the current self.request
http404 = None
# did any content type successfully end processing?
successful = False
for content in self.object.content.all_of_type(tuple(
self.object._feincms_content_types_with_process)):
try:
r = content.process(self.request, view=self)
if r in (True, False):
successful = r
elif r:
return r
except Http404 as e:
http404 = e
if not successful:
if http404:
# re-raise stored Http404 exception
raise http404
extra_context = self.request._feincms_extra_context
if (not settings.FEINCMS_ALLOW_EXTRA_PATH and
extra_context.get('extra_path', '/') != '/' and
# XXX Already inside application content. I'm not sure
# whether this fix is really correct...
not extra_context.get('app_config')):
raise Http404(str('Not found (extra_path %r on %r)') % (
extra_context.get('extra_path', '/'),
self.object,
))
def finalize_content_types(self, response):
"""
Runs finalize() on content types having such a method, adds headers and
returns the final response.
"""
for content in self.object.content.all_of_type(tuple(
self.object._feincms_content_types_with_finalize)):
r = content.finalize(self.request, response)
if r:
return r
class ContentView(ContentObjectMixin, generic.DetailView):
def dispatch(self, request, *args, **kwargs):
if request.method.lower() not in self.http_method_names:
return self.http_method_not_allowed(request, *args, **kwargs)
self.request = request
self.args = args
self.kwargs = kwargs
self.object = self.get_object()
return self.handler(request, *args, **kwargs)
class StandaloneView(generic.View):
@method_decorator(standalone)
def dispatch(self, request, *args, **kwargs):
return super(StandaloneView, self).dispatch(request, *args, **kwargs)
| mit |
QuLogic/vispy | codegen/annotations.py | 18 | 17242 | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
This module contains manual annotations for the gl backends. Together
with the header files, we can generatre the full ES 2.0 API.
Every function-annotations consists of sections that apply to one or
more backends. If no backends are specified in the first section, it
applies to all backends.
"""
import ctypes
## bind / gen / delete stuff
def deleteBuffer(buffer):
# --- gl es
n = 1
buffers = (ctypes.c_uint*n)(buffer)
()
# --- pyopengl
GL.glDeleteBuffers(1, [buffer])
def deleteFramebuffer(framebuffer):
# --- gl es
n = 1
framebuffers = (ctypes.c_uint*n)(framebuffer)
()
# --- pyopengl
FBO.glDeleteFramebuffers(1, [framebuffer])
def deleteRenderbuffer(renderbuffer):
# --- gl es
n = 1
renderbuffers = (ctypes.c_uint*n)(renderbuffer)
()
# --- pyopengl
FBO.glDeleteRenderbuffers(1, [renderbuffer])
def deleteTexture(texture):
# --- gl es
n = 1
textures = (ctypes.c_uint*n)(texture)
()
# --- pyopengl
GL.glDeleteTextures([texture])
def createBuffer():
# --- gl es
n = 1
buffers = (ctypes.c_uint*n)()
()
return buffers[0]
# --- pyopengl
return GL.glGenBuffers(1)
# --- mock
return 1
def createFramebuffer():
# --- gl es
n = 1
framebuffers = (ctypes.c_uint*n)()
()
return framebuffers[0]
# --- pyopengl
return FBO.glGenFramebuffers(1)
# --- mock
return 1
def createRenderbuffer():
# --- gl es
n = 1
renderbuffers = (ctypes.c_uint*n)()
()
return renderbuffers[0]
# --- pyopengl
return FBO.glGenRenderbuffers(1)
# --- mock
return 1
def createTexture():
# --- gl es
n = 1
textures = (ctypes.c_uint*n)()
()
return textures[0]
# --- pyopengl
return GL.glGenTextures(1)
# --- mock
return 1
## Image stuff
def texImage2D(target, level, internalformat, format, type, pixels):
border = 0
# --- gl es
if isinstance(pixels, (tuple, list)):
height, width = pixels
pixels = ctypes.c_void_p(0)
pixels = None
else:
if not pixels.flags['C_CONTIGUOUS']:
pixels = pixels.copy('C')
pixels_ = pixels
pixels = pixels_.ctypes.data
height, width = pixels_.shape[:2]
()
# --- pyopengl
if isinstance(pixels, (tuple, list)):
height, width = pixels
pixels = None
else:
height, width = pixels.shape[:2]
GL.glTexImage2D(target, level, internalformat, width, height, border, format, type, pixels)
def texSubImage2D(target, level, xoffset, yoffset, format, type, pixels):
# --- gl es
if not pixels.flags['C_CONTIGUOUS']:
pixels = pixels.copy('C')
pixels_ = pixels
pixels = pixels_.ctypes.data
height, width = pixels_.shape[:2]
()
# --- pyopengl
height, width = pixels.shape[:2]
GL.glTexSubImage2D(target, level, xoffset, yoffset, width, height, format, type, pixels)
def readPixels(x, y, width, height, format, type):
# --- gl es mock
# GL_ALPHA, GL_RGB, GL_RGBA
t = {6406:1, 6407:3, 6408:4}[format]
# GL_UNSIGNED_BYTE, GL_FLOAT
nb = {5121:1, 5126:4}[type]
size = int(width*height*t*nb)
# --- gl es
pixels = ctypes.create_string_buffer(size)
()
return pixels[:]
# --- mock
return size * b'\x00'
def compressedTexImage2D(target, level, internalformat, width, height, border=0, data=None):
# border = 0 # set in args
# --- gl es
if not data.flags['C_CONTIGUOUS']:
data = data.copy('C')
data_ = data
size = data_.size
data = data_.ctypes.data
()
# --- pyopengl
size = data.size
GL.glCompressedTexImage2D(target, level, internalformat, width, height, border, size, data)
def compressedTexSubImage2D(target, level, xoffset, yoffset, width, height, format, data):
# --- gl es
if not data.flags['C_CONTIGUOUS']:
data = data.copy('C')
data_ = data
size = data_.size
data = data_.ctypes.data
()
# --- pyopengl
size = data.size
GL.glCompressedTexSubImage2D(target, level, xoffset, yoffset, width, height, format, size, data)
## Buffer data
def bufferData(target, data, usage):
""" Data can be numpy array or the size of data to allocate.
"""
# --- gl es
if isinstance(data, int):
size = data
data = ctypes.c_voidp(0)
else:
if not data.flags['C_CONTIGUOUS'] or not data.flags['ALIGNED']:
data = data.copy('C')
data_ = data
size = data_.nbytes
data = data_.ctypes.data
()
# --- pyopengl
if isinstance(data, int):
size = data
data = None
else:
size = data.nbytes
GL.glBufferData(target, size, data, usage)
def bufferSubData(target, offset, data):
# --- gl es
if not data.flags['C_CONTIGUOUS']:
data = data.copy('C')
data_ = data
size = data_.nbytes
data = data_.ctypes.data
()
# --- pyopengl
size = data.nbytes
GL.glBufferSubData(target, offset, size, data)
def drawElements(mode, count, type, offset):
# --- gl es
if offset is None:
offset = ctypes.c_void_p(0)
elif isinstance(offset, ctypes.c_void_p):
pass
elif isinstance(offset, (int, ctypes.c_int)):
offset = ctypes.c_void_p(int(offset))
else:
if not offset.flags['C_CONTIGUOUS']:
offset = offset.copy('C')
offset_ = offset
offset = offset.ctypes.data
indices = offset
()
# --- pyopengl
if offset is None:
offset = ctypes.c_void_p(0)
elif isinstance(offset, (int, ctypes.c_int)):
offset = ctypes.c_void_p(int(offset))
()
def vertexAttribPointer(indx, size, type, normalized, stride, offset):
# --- gl es
if offset is None:
offset = ctypes.c_void_p(0)
elif isinstance(offset, ctypes.c_void_p):
pass
elif isinstance(offset, (int, ctypes.c_int)):
offset = ctypes.c_void_p(int(offset))
else:
if not offset.flags['C_CONTIGUOUS']:
offset = offset.copy('C')
offset_ = offset
offset = offset.ctypes.data
# We need to ensure that the data exists at draw time :(
# PyOpenGL does this too
key = '_vert_attr_'+str(indx)
setattr(glVertexAttribPointer, key, offset_)
ptr = offset
()
# --- pyopengl
if offset is None:
offset = ctypes.c_void_p(0)
elif isinstance(offset, (int, ctypes.c_int)):
offset = ctypes.c_void_p(int(offset))
()
def bindAttribLocation(program, index, name):
# --- gl es
name = ctypes.c_char_p(name.encode('utf-8'))
()
# --- pyopengl
name = name.encode('utf-8')
()
## Setters
def shaderSource(shader, source):
# Some implementation do not like getting a list of single chars
if isinstance(source, (tuple, list)):
strings = [s for s in source]
else:
strings = [source]
# --- gl es
count = len(strings)
string = (ctypes.c_char_p*count)(*[s.encode('utf-8') for s in strings])
length = (ctypes.c_int*count)(*[len(s) for s in strings])
()
# --- pyopengl
GL.glShaderSource(shader, strings)
## Getters
def _getBooleanv(pname):
# --- gl es
params = (ctypes.c_bool*1)()
()
return params[0]
def _getIntegerv(pname):
# --- gl es
n = 16
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*n)(*[d for i in range(n)])
()
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
def _getFloatv(pname):
# --- gl es
n = 16
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
()
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
# def _getString(pname):
# # --- gl es
# ()
# return res.value
# # --- mock
# return ''
def getParameter(pname):
if pname in [33902, 33901, 32773, 3106, 2931, 2928,
2849, 32824, 10752, 32938]:
# GL_ALIASED_LINE_WIDTH_RANGE GL_ALIASED_POINT_SIZE_RANGE
# GL_BLEND_COLOR GL_COLOR_CLEAR_VALUE GL_DEPTH_CLEAR_VALUE
# GL_DEPTH_RANGE GL_LINE_WIDTH GL_POLYGON_OFFSET_FACTOR
# GL_POLYGON_OFFSET_UNITS GL_SAMPLE_COVERAGE_VALUE
return _glGetFloatv(pname)
elif pname in [7936, 7937, 7938, 35724, 7939]:
# GL_VENDOR, GL_RENDERER, GL_VERSION, GL_SHADING_LANGUAGE_VERSION,
# GL_EXTENSIONS are strings
pass # string handled below
else:
return _glGetIntegerv(pname)
name = pname
# --- gl es
()
return ctypes.string_at(res).decode('utf-8') if res else ''
# --- pyopengl
res = GL.glGetString(pname)
return res.decode('utf-8')
def getUniform(program, location):
# --- gl es
n = 16
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
()
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
# --- pyopengl
n = 16
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
GL.glGetUniformfv(program, location, params)
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
def getVertexAttrib(index, pname):
# --- gl es
n = 4
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
()
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
# --- pyopengl
# From PyOpenGL v3.1.0 the glGetVertexAttribfv(index, pname) does
# work, but it always returns 4 values, with zeros in the empty
# spaces. We have no way to tell whether they are empty or genuine
# zeros. Fortunately, pyopengl also supports the old syntax.
n = 4
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
GL.glGetVertexAttribfv(index, pname, params)
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
def getTexParameter(target, pname):
# --- gl es
d = float('Inf')
params = (ctypes.c_float*1)(d)
()
return params[0]
def getActiveAttrib(program, index):
# --- gl es pyopengl
bufsize = 256
length = (ctypes.c_int*1)()
size = (ctypes.c_int*1)()
type = (ctypes.c_uint*1)()
name = ctypes.create_string_buffer(bufsize)
# --- gl es
()
name = name[:length[0]].decode('utf-8')
return name, size[0], type[0]
# --- pyopengl
# pyopengl has a bug, this is a patch
GL.glGetActiveAttrib(program, index, bufsize, length, size, type, name)
name = name[:length[0]].decode('utf-8')
return name, size[0], type[0]
# --- mock
return 'mock_val', 1, 5126
def getVertexAttribOffset(index, pname):
# --- gl es
pointer = (ctypes.c_void_p*1)()
()
return pointer[0] or 0
# --- pyopengl
try: # maybe the fixed it
()
except TypeError:
pointer = (ctypes.c_void_p*1)()
GL.glGetVertexAttribPointerv(index, pname, pointer)
return pointer[0] or 0
# --- mock
return 0
def getActiveUniform(program, index):
# --- gl es
bufsize = 256
length = (ctypes.c_int*1)()
size = (ctypes.c_int*1)()
type = (ctypes.c_uint*1)()
name = ctypes.create_string_buffer(bufsize)
()
name = name[:length[0]].decode('utf-8')
return name, size[0], type[0]
# --- pyopengl
name, size, type = GL.glGetActiveUniform(program, index)
return name.decode('utf-8'), size, type
def getAttachedShaders(program):
# --- gl es
maxcount = 256
count = (ctypes.c_int*1)()
shaders = (ctypes.c_uint*maxcount)()
()
return tuple(shaders[:count[0]])
def getAttribLocation(program, name):
# --- gl es
name = ctypes.c_char_p(name.encode('utf-8'))
()
return res
# --- pyopengl
name = name.encode('utf-8')
()
def getUniformLocation(program, name):
# --- gl es
name = ctypes.c_char_p(name.encode('utf-8'))
()
return res
# --- pyopengl
name = name.encode('utf-8')
()
def getProgramInfoLog(program):
# --- gl es
bufsize = 1024
length = (ctypes.c_int*1)()
infolog = ctypes.create_string_buffer(bufsize)
()
return infolog[:length[0]].decode('utf-8')
# --- pyopengl
res = GL.glGetProgramInfoLog(program)
return res.decode('utf-8') if isinstance(res, bytes) else res
def getShaderInfoLog(shader):
# --- gl es
bufsize = 1024
length = (ctypes.c_int*1)()
infolog = ctypes.create_string_buffer(bufsize)
()
return infolog[:length[0]].decode('utf-8')
# --- pyopengl
res = GL.glGetShaderInfoLog(shader)
return res.decode('utf-8') if isinstance(res, bytes) else res
def getProgramParameter(program, pname):
# --- gl es
params = (ctypes.c_int*1)()
()
return params[0]
def getShaderParameter(shader, pname):
# --- gl es
params = (ctypes.c_int*1)()
()
return params[0]
def getShaderPrecisionFormat(shadertype, precisiontype):
# --- gl es
range = (ctypes.c_int*1)()
precision = (ctypes.c_int*1)()
()
return range[0], precision[0]
def getShaderSource(shader):
# --- gl es
bufsize = 1024*1024
length = (ctypes.c_int*1)()
source = (ctypes.c_char*bufsize)()
()
return source.value[:length[0]].decode('utf-8')
# --- pyopengl
res = GL.glGetShaderSource(shader)
return res.decode('utf-8')
def getBufferParameter(target, pname):
# --- gl es
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
()
return params[0]
def getFramebufferAttachmentParameter(target, attachment, pname):
# --- gl es
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
()
return params[0]
# --- pyopengl
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
FBO.glGetFramebufferAttachmentParameteriv(target, attachment, pname, params)
return params[0]
def getRenderbufferParameter(target, pname):
# --- gl es
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
()
return params[0]
# --- pyopengl
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
FBO.glGetRenderbufferParameteriv(target, pname, params)
return params[0]
## ============================================================================
class FunctionAnnotation:
def __init__(self, name, args, output):
self.name = name
self.args = args
self.output = output
self.lines = [] # (line, comment) tuples
def __repr__(self):
return '<FunctionAnnotation for %s>' % self.name
def get_lines(self, call, backend):
""" Get the lines for this function based on the given backend.
The given API call is inserted at the correct location.
"""
backend_selector = (backend, ) # first lines are for all backends
lines = []
for line in self.lines:
if line.lstrip().startswith('# ---'):
backend_selector = line.strip().split(' ')
continue
if backend in backend_selector:
if line.strip() == '()':
indent = line.split('(')[0][4:]
line = indent + call
lines.append(line)
return lines
def is_arg_set(self, name):
""" Get whether a given variable name is set.
This allows checking whether a variable that is an input to the C
function is not an input for the Python function, and may be an output.
"""
needle = '%s =' % name
for line, comment in self.lines:
if line.startswith(needle):
return True
else:
return False
def parse_anotations():
""" Parse this annotations file and produce a dictionary of
FunctionAnnotation objects.
"""
functions = {}
function = None
for line in open(__file__, 'rt').readlines():
# Stop?
if '='*40 in line:
break
if line.startswith('def '):
name = line.split(' ')[1].split('(')[0]
args = line.split('(')[1].split(')')[0].split(', ')
args = [arg for arg in args if arg]
out = line.partition('->')[2].strip()
function = FunctionAnnotation(name, args, out)
functions[name] = function
continue
elif not function:
continue
# Add line
line = line.rstrip()
indent = len(line) - len(line.strip())
if line.strip() and indent >=4:
function.lines.append(line)
return functions
if __name__ == '__main__':
print(parse_anotations().keys())
| bsd-3-clause |
vvovo/vvo | model/follow.py | 1 | 2334 | #!/usr/bin/env python
# coding=utf-8
#
# Copyright 2014 vvovo.com
# Very way to victory.
# Let the dream set sail.
import time
from lib.query import Query
class FollowModel(Query):
def __init__(self, db):
self.db = db
self.table_name = "follow"
super(FollowModel, self).__init__()
def add_new_follow(self, follow_info):
return self.data(follow_info).add()
def get_follow_info_by_user_id_and_follow_user_id(self, user_id, follow_user_id):
where = "user_id = %s AND follow_user_id = %s" % (user_id, follow_user_id)
return self.where(where).find()
def delete_follow_info_by_user_id_and_follow_user_id(self, user_id, follow_user_id):
where = "user_id = %s AND follow_user_id = %s" % (user_id, follow_user_id)
return self.where(where).delete()
def get_user_follow_count(self, user_id):
where = "user_id = %s" % user_id
return self.where(where).count()
def get_user_all_follow_topics(self, user_id, num = 16, current_page = 1):
where = "follow.user_id = %s" % user_id
join = "RIGHT JOIN topic ON follow.follow_user_id = topic.author_id \
LEFT JOIN user AS author_user ON topic.author_id = author_user.uid \
LEFT JOIN college ON topic.college_id = college.id \
LEFT JOIN node ON topic.node_id = node.id \
LEFT JOIN user AS last_replied_user ON topic.last_replied_by = last_replied_user.uid"
order = "topic.last_touched DESC, topic.created DESC, topic.last_replied_time DESC, topic.id DESC"
field = "topic.*, \
author_user.username as author_username, \
author_user.nickname as author_nickname, \
author_user.avatar as author_avatar, \
author_user.uid as author_uid, \
author_user.reputation as author_reputation, \
node.name as node_name, \
node.slug as node_slug, \
college.name as college_name, \
college.id as college_id, \
last_replied_user.username as last_replied_username, \
last_replied_user.nickname as last_replied_nickname"
return self.where(where).order(order).join(join).field(field).pages(current_page = current_page, list_rows = num)
| bsd-3-clause |
michaelroland/wdnas-dl2100-hwtools | wdhwdaemon/client.py | 1 | 21754 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Western Digital Hardware Controller Client.
Copyright (c) 2017 Michael Roland <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
from threadedsockets.packets import BasicPacket
from threadedsockets.packetclient import BasicPacketClient
from threadedsockets.unixsockets import UnixSocketFactory
from wdhwdaemon.daemon import ConfigFile
from wdhwdaemon.server import CommandPacket, ResponsePacket
from wdhwdaemon.server import CloseConnectionWarning
from wdhwdaemon.server import LEDStatus
import wdhwdaemon
_logger = logging.getLogger(__name__)
WDHWC_EXIT_SUCCESS = 0
class WdHwConnector(BasicPacketClient):
"""WD Hardware Controller Client Connector.
"""
def __init__(self, socket_path):
"""Initializes a new hardware controller client connector.
Args:
socket_path (str): File path of the named UNIX domain socket.
"""
socket_factory = UnixSocketFactory(socket_path)
client_socket = socket_factory.connectSocket()
super().__init__(client_socket, packet_class=ResponsePacket)
def _executeCommand(self, command_code, parameter=None, keep_alive=True, more_flags=0):
flags = more_flags
if keep_alive:
flags |= CommandPacket.FLAG_KEEP_ALIVE
command = CommandPacket(command_code, parameter=parameter, flags=flags)
_logger.debug("%s: Sending command '%02X' (%s)",
type(self).__name__,
command_code, repr(parameter))
self.sendPacket(command)
response = self.receivePacket()
if response.identifier != command_code:
# unexpected response
_logger.error("%s: Received unexpected response '%02X' for command '%02X'",
type(self).__name__,
response.identifier, command_code)
raise CloseConnectionWarning("Unexpected response '{:02X}' received".format(response.identifier))
elif response.is_error:
# error
_logger.error("%s: Received error '%02X'",
type(self).__name__,
response.error_code)
raise CloseConnectionWarning("Error '{:02X}' received".format(response.error_code))
else:
# success
_logger.debug("%s: Received successful response (%s)",
type(self).__name__,
repr(response.parameter))
return response.parameter
def getVersion(self):
response = self._executeCommand(CommandPacket.CMD_VERSION_GET)
return response.decode('utf-8', 'ignore')
def daemonShutdown(self):
response = self._executeCommand(CommandPacket.CMD_DAEMON_SHUTDOWN)
def getPMCVersion(self):
response = self._executeCommand(CommandPacket.CMD_PMC_VERSION_GET)
return response.decode('utf-8', 'ignore')
def getPMCStatus(self):
response = self._executeCommand(CommandPacket.CMD_PMC_STATUS_GET)
if len(response) > 0:
return response[0]
else:
raise ValueError("Invalid response format")
def setPMCConfiguration(self, config):
response = self._executeCommand(CommandPacket.CMD_PMC_CONFIGURATION_SET,
parameter=bytearray([config]))
def getPMCConfiguration(self):
response = self._executeCommand(CommandPacket.CMD_PMC_CONFIGURATION_GET)
if len(response) > 0:
return response[0]
else:
raise ValueError("Invalid response format")
def getPMCDLB(self):
response = self._executeCommand(CommandPacket.CMD_PMC_DLB_GET)
if len(response) > 0:
return response[0]
else:
raise ValueError("Invalid response format")
def setPowerLED(self, led_status):
response = self._executeCommand(CommandPacket.CMD_POWER_LED_SET,
led_status.serialize())
def getPowerLED(self):
response = self._executeCommand(CommandPacket.CMD_POWER_LED_GET)
return LEDStatus(response)
def setUSBLED(self, led_status):
response = self._executeCommand(CommandPacket.CMD_USB_LED_SET,
led_status.serialize())
def getUSBLED(self):
response = self._executeCommand(CommandPacket.CMD_USB_LED_GET)
return LEDStatus(response)
def setLCDBacklightIntensity(self, intensity):
response = self._executeCommand(CommandPacket.CMD_LCD_BACKLIGHT_INTENSITY_GET,
parameter=bytearray([intensity]))
def getLCDBacklightIntensity(self):
response = self._executeCommand(CommandPacket.CMD_LCD_BACKLIGHT_INTENSITY_SET)
if len(response) > 0:
return response[0]
else:
raise ValueError("Invalid response format")
def setLCDText(self, line, text):
parameter = bytearray([line])
parameter.extend(text.encode('ascii', 'ignore'))
response = self._executeCommand(CommandPacket.CMD_LCD_TEXT_SET,
parameter=parameter)
def getPMCTemperature(self):
response = self._executeCommand(CommandPacket.CMD_PMC_TEMPERATURE_GET)
if len(response) > 1:
return ((response[0] << 8) & 0x0FF00) | (response[1] & 0x0FF)
else:
raise ValueError("Invalid response format")
def getFanRPM(self):
response = self._executeCommand(CommandPacket.CMD_FAN_RPM_GET)
if len(response) > 1:
return ((response[0] << 8) & 0x0FF00) | (response[1] & 0x0FF)
else:
raise ValueError("Invalid response format")
def setFanSpeed(self, speed):
response = self._executeCommand(CommandPacket.CMD_FAN_SPEED_SET,
parameter=bytearray([speed]))
def getFanSpeed(self):
response = self._executeCommand(CommandPacket.CMD_FAN_SPEED_GET)
if len(response) > 0:
return response[0]
else:
raise ValueError("Invalid response format")
def getDrivePresentMask(self):
response = self._executeCommand(CommandPacket.CMD_DRIVE_PRESENT_GET)
if len(response) > 0:
return response[0]
else:
raise ValueError("Invalid response format")
def setDriveEnabled(self, drive_bay, enable):
enable_val = 0
if enable:
enable_val = 1
response = self._executeCommand(CommandPacket.CMD_DRIVE_ENABLED_SET,
parameter=bytearray([drive_bay, enable_val]))
def getDriveEnabledMask(self):
response = self._executeCommand(CommandPacket.CMD_DRIVE_ENABLED_GET)
if len(response) > 0:
return response[0]
else:
raise ValueError("Invalid response format")
class WdHwClient(object):
"""WD Hardware Controller Client.
"""
def __init__(self):
"""Initializes a new hardware controller client."""
super().__init__()
def main(self, argv):
"""Main loop of the hardware controller client.
Args:
argv (List(str)): List of command line arguments.
Returns:
int: Exit status code.
"""
cmdparser = argparse.ArgumentParser(
description=wdhwdaemon.WDHWC_DESCRIPTION,
epilog=wdhwdaemon.WDHWD_EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
cmdparser.add_argument(
'-C', '--config', action='store', nargs='?', metavar='CONFIG_FILE',
default=wdhwdaemon.WDHWD_CONFIG_FILE_DEFAULT,
help='configuration file (default: %(default)s)')
cmdparser.add_argument(
'-v', '--verbose', action='count',
default=0,
help='sets the console logging verbosity level')
cmdparser.add_argument(
'-q', '--quiet', action='store_true',
help='disables console logging output')
cmdparser.add_argument(
'-V', '--version', action='version',
version=wdhwdaemon.WDHWD_VERSION,
help='show version information and exit')
subparsers = cmdparser.add_subparsers(
dest='command', metavar='COMMAND', title='available subcommands')
cmd_version = subparsers.add_parser('version', help='get system version command',
description="{}\nversion: get system version command".format(wdhwdaemon.WDHWC_DESCRIPTION),
epilog=wdhwdaemon.WDHWD_EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
cmd_led = subparsers.add_parser('led', help='LED control command',
description="{}\nled: LED control command".format(wdhwdaemon.WDHWC_DESCRIPTION),
epilog=wdhwdaemon.WDHWD_EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
cmd_led_type = cmd_led.add_argument_group(title='LED type to control')
cmd_led_type = cmd_led_type.add_mutually_exclusive_group(required=True)
cmd_led_type.add_argument(
'--power', '-P', dest='led_type', action='store_const',
const="power",
help='power LED')
cmd_led_type.add_argument(
'--usb', '-U', dest='led_type', action='store_const',
const="usb",
help='USB LED')
cmd_led_action = cmd_led.add_argument_group(title='LED action mode')
cmd_led_action = cmd_led_action.add_mutually_exclusive_group()
cmd_led_action.add_argument(
'-g', '--get', action='store_true',
help='get current status (also the default if no mode is given)')
cmd_led_action.add_argument(
'-s', '--steady', action='store_true',
help='set steady mode')
cmd_led_action.add_argument(
'-b', '--blink', action='store_true',
help='set blinking mode')
cmd_led_action.add_argument(
'-p', '--pulse', action='store_true',
help='set pulsing mode')
cmd_led_color = cmd_led.add_argument_group(title='LED color')
cmd_led_color.add_argument(
'-R', '--red', action='store_true',
help='red on (defaults to off when option is absent)')
cmd_led_color.add_argument(
'-G', '--green', action='store_true',
help='green on (defaults to off when option is absent)')
cmd_led_color.add_argument(
'-B', '--blue', action='store_true',
help='blue on (defaults to off when option is absent)')
cmd_fan = subparsers.add_parser('fan', help='fan control command',
description="{}\nfan: fan control command".format(wdhwdaemon.WDHWC_DESCRIPTION),
epilog=wdhwdaemon.WDHWD_EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
cmd_fan_action = cmd_fan.add_argument_group(title='fan action mode')
cmd_fan_action = cmd_fan_action.add_mutually_exclusive_group()
cmd_fan_action.add_argument(
'-g', '--get', action='store_true',
help='get current status (also the default if no mode is given)')
cmd_fan_action.add_argument(
'-s', '--set', action='store', type=int, dest='speed', metavar="SPEED",
default=None,
help='set fan speed in percent')
cmd_temperature = subparsers.add_parser('temperature', help='get system temperature command',
description="{}\ntemperature: get system temperature command".format(wdhwdaemon.WDHWC_DESCRIPTION),
epilog=wdhwdaemon.WDHWD_EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
cmd_drive = subparsers.add_parser('drive', help='drive bay control command',
description="{}\ndrive: drive bay control command".format(wdhwdaemon.WDHWC_DESCRIPTION),
epilog=wdhwdaemon.WDHWD_EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
cmd_drive_action = cmd_drive.add_argument_group(title='drive bay action mode')
cmd_drive_action = cmd_drive_action.add_mutually_exclusive_group()
cmd_drive_action.add_argument(
'-g', '--get', action='store_true',
help='get current status (also the default if no mode is given)')
cmd_drive_action.add_argument(
'-e', '--enable', action='store', type=int, dest='drivebay_enable', metavar="DRIVE_BAY",
default=None,
help='set drive bay number %(metavar)s enabled')
cmd_drive_action.add_argument(
'-d', '--disable', action='store', type=int, dest='drivebay_disable', metavar="DRIVE_BAY",
default=None,
help='set drive bay number %(metavar)s disabled')
cmd_shutdown = subparsers.add_parser('shutdown', help='daemon shutdown command',
description="{}\nshutdown: daemon shutdown command".format(wdhwdaemon.WDHWC_DESCRIPTION),
epilog=wdhwdaemon.WDHWD_EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
args = cmdparser.parse_args(argv[1:])
log_level = logging.ERROR
if args.verbose > 3:
log_level = logging.NOTSET
elif args.verbose > 2:
log_level = logging.DEBUG
elif args.verbose > 1:
log_level = logging.INFO
elif args.verbose > 0:
log_level = logging.WARNING
logger = logging.getLogger("")
logger.setLevel(log_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if not args.quiet:
consolelog = logging.StreamHandler()
consolelog.setLevel(log_level)
consolelog.setFormatter(formatter)
logger.addHandler(consolelog)
_logger.debug("%s: Loading configuration file '%s'",
type(self).__name__,
args.config)
cfg = ConfigFile(args.config)
conn = WdHwConnector(cfg.socket_path)
if args.command == "version":
daemon_version = conn.getVersion()
pmc_version = conn.getPMCVersion()
print("Daemon version: {0}".format(daemon_version))
print("PMC version: {0}".format(pmc_version))
elif args.command == "led":
if args.get or ((not args.steady) and (not args.blink) and (not args.pulse)):
if args.led_type == "power":
led_status = conn.getPowerLED()
print("Power LED\t{0:5}\t{1:5}\t{2:5}".format(
"red", "green", "blue"))
print("----------------------------------------")
if led_status.mask_const:
print("steady: \t{0:5}\t{1:5}\t{2:5}".format(
"on" if led_status.red_const else "off",
"on" if led_status.green_const else "off",
"on" if led_status.blue_const else "off"))
if led_status.mask_blink:
print("blink: \t{0:5}\t{1:5}\t{2:5}".format(
"on" if led_status.red_blink else "off",
"on" if led_status.green_blink else "off",
"on" if led_status.blue_blink else "off"))
if led_status.mask_pulse:
print("pulse: \t{0:5}\t{1:5}\t{2:5}".format(
"on" if led_status.red_pulse else "---",
"on" if led_status.green_pulse else "---",
"on" if led_status.blue_pulse else "off"))
elif args.led_type == "usb":
led_status = conn.getUSBLED()
print("USB LED \t{0:5}\t{1:5}\t{2:5}".format(
"red", "green", "blue"))
print("----------------------------------------")
if led_status.mask_const:
print("steady: \t{0:5}\t{1:5}\t{2:5}".format(
"on " if led_status.red_const else "off",
"on " if led_status.green_const else "---",
"on " if led_status.blue_const else "off"))
if led_status.mask_blink:
print("blink: \t{0:5}\t{1:5}\t{2:5}".format(
"on " if led_status.red_blink else "off",
"on " if led_status.green_blink else "---",
"on " if led_status.blue_blink else "off"))
if led_status.mask_pulse:
print("pulse: \t{0:5}\t{1:5}\t{2:5}".format(
"on " if led_status.red_pulse else "---",
"on " if led_status.green_pulse else "---",
"on " if led_status.blue_pulse else "---"))
else:
led_status = LEDStatus()
if args.steady:
led_status.mask_const = True
led_status.red_const = args.red
led_status.green_const = args.green
led_status.blue_const = args.blue
elif args.blink:
led_status.mask_blink = True
led_status.red_blink = args.red
led_status.green_blink = args.green
led_status.blue_blink = args.blue
elif args.pulse:
led_status.mask_pulse = True
led_status.red_pulse = args.red
led_status.green_pulse = args.green
led_status.blue_pulse = args.blue
if args.led_type == "power":
conn.setPowerLED(led_status)
elif args.led_type == "usb":
conn.setUSBLED(led_status)
elif args.command == "fan":
if args.get or (args.speed is None):
fan_rpm = conn.getFanRPM()
fan_speed = conn.getFanSpeed()
print("Fan speed: {0} RPM at {1} %".format(fan_rpm, fan_speed))
else:
if (args.speed < 0) or (args.speed > 100):
cmdparser.error("Parameter SPEED is out of valid range (0 <= SPEED <= 100)")
else:
conn.setFanSpeed(args.speed)
elif args.command == "drive":
if args.get or ((args.drivebay_enable is None) and (args.drivebay_disable is None)):
present_mask = conn.getDrivePresentMask()
enabled_mask = conn.getDriveEnabledMask()
config_register = conn.getPMCConfiguration()
status_register = conn.getPMCStatus()
dlb = conn.getPMCDLB()
print("Automatic HDD power-up on presence detection: {0}".format(
"on" if (config_register & 0x001) != 0 else "off"))
print("Drive bay\tDrive present\tDrive enabled")
for drive_bay in range(0, len(cfg.disk_drives)):
print("{0:9d}\t{1:13}\t{2:13}".format(
drive_bay,
"no" if (present_mask & (1<<drive_bay)) != 0 else "yes",
"yes" if (enabled_mask & (1<<drive_bay)) != 0 else "no"))
else:
drive_bay = None
enabled = True
if args.drivebay_enable is not None:
enabled = True
drive_bay = args.drivebay_enable
elif args.drivebay_disable is not None:
enabled = False
drive_bay = args.drivebay_disable
else:
cmdparser.error("Must specify at least one drive command")
if drive_bay is not None:
conn.setDriveEnabled(drive_bay, enabled)
else:
cmdparser.error("Must specify at least one drive command")
elif args.command == "temperature":
pmc_temperature = conn.getPMCTemperature()
print("PMC temperature: {0} °C".format(pmc_temperature))
elif args.command == "shutdown":
conn.daemonShutdown()
conn.close()
return WDHWC_EXIT_SUCCESS
if __name__ == "__main__":
import sys
c = WdHwClient()
ret = c.main(sys.argv)
sys.exit(ret)
| gpl-3.0 |
fyookball/electrum | plugins/ledger/auth2fa.py | 1 | 6760 | from PyQt5.QtWidgets import *
from electroncash.i18n import _
from electroncash_gui.qt.util import *
from electroncash.util import print_error
from electroncash.address import Address
from electroncash import networks
import copy
from btchip.btchip import BTChipException
helpTxt = [_("Your Ledger Wallet wants to tell you a one-time PIN code.<br><br>" \
"For best security you should unplug your device, open a text editor on another computer, " \
"put your cursor into it, and plug your device into that computer. " \
"It will output a summary of the transaction being signed and a one-time PIN.<br><br>" \
"Verify the transaction summary and type the PIN code here.<br><br>" \
"Before pressing enter, plug the device back into this computer.<br>" ),
_("Verify the address below.<br>Type the character from your security card corresponding to the <u><b>BOLD</b></u> character.")
]
class LedgerAuthDialog(QDialog):
def __init__(self, handler, data):
'''Ask user for 2nd factor authentication. Support text and security card methods.
Use last method from settings, but support downgrade.
'''
QDialog.__init__(self, handler.top_level_window())
self.handler = handler
self.txdata = data
self.idxs = self.txdata['keycardData'] if self.txdata['confirmationType'] > 1 else ''
self.setMinimumWidth(1000)
self.setWindowTitle(_("Ledger Wallet Authentication"))
self.cfg = copy.deepcopy(self.handler.win.wallet.get_keystore().cfg)
self.dongle = self.handler.win.wallet.get_keystore().get_client().dongle
self.pin = ''
self.devmode = self.getDevice2FAMode()
if self.devmode == 0x11 or self.txdata['confirmationType'] == 1:
self.cfg['mode'] = 0
vbox = QVBoxLayout()
self.setLayout(vbox)
def on_change_mode(idx):
self.cfg['mode'] = 0 if self.devmode == 0x11 else idx if idx > 0 else 1
if self.cfg['mode'] > 0:
self.handler.win.wallet.get_keystore().cfg = self.cfg
self.handler.win.wallet.save_keystore()
self.update_dlg()
def return_pin():
self.pin = self.pintxt.text() if self.txdata['confirmationType'] == 1 else self.cardtxt.text()
if self.cfg['mode'] == 1:
self.pin = ''.join(chr(int(str(i),16)) for i in self.pin)
self.accept()
self.modebox = QWidget()
modelayout = QHBoxLayout()
self.modebox.setLayout(modelayout)
modelayout.addWidget(QLabel(_("Method:")))
self.modes = QComboBox()
modelayout.addWidget(self.modes, 2)
modelayout.addStretch(1)
self.modebox.setMaximumHeight(50)
vbox.addWidget(self.modebox)
self.populate_modes()
self.modes.currentIndexChanged.connect(on_change_mode)
self.helpmsg = QTextEdit()
self.helpmsg.setStyleSheet("QTextEdit { background-color: lightgray; }")
self.helpmsg.setReadOnly(True)
vbox.addWidget(self.helpmsg)
self.pinbox = QWidget()
pinlayout = QHBoxLayout()
self.pinbox.setLayout(pinlayout)
self.pintxt = QLineEdit()
self.pintxt.setEchoMode(2)
self.pintxt.setMaxLength(4)
self.pintxt.returnPressed.connect(return_pin)
pinlayout.addWidget(QLabel(_("Enter PIN:")))
pinlayout.addWidget(self.pintxt)
pinlayout.addWidget(QLabel(_("NOT DEVICE PIN - see above")))
pinlayout.addStretch(1)
self.pinbox.setVisible(self.cfg['mode'] == 0)
vbox.addWidget(self.pinbox)
self.cardbox = QWidget()
card = QVBoxLayout()
self.cardbox.setLayout(card)
self.addrtext = QTextEdit()
self.addrtext.setStyleSheet("QTextEdit { color:blue; background-color:lightgray; padding:15px 10px; border:none; font-size:20pt; }")
self.addrtext.setReadOnly(True)
self.addrtext.setMaximumHeight(120)
card.addWidget(self.addrtext)
def pin_changed(s):
if len(s) < len(self.idxs):
i = self.idxs[len(s)]
address = self.txdata['address']
# Always generate the mainnet address as the code is generated from mainnet address
addressstr = address.to_string(Address.FMT_LEGACY, net=networks.MainNet)
addressstr = addressstr[:i] + '<u><b>' + addressstr[i:i+1] + '</u></b>' + addressstr[i+1:]
# We also show the UI address if it is different
if networks.net.TESTNET or not Address.FMT_UI == Address.FMT_LEGACY:
addressstr = address.to_ui_string() + '\n' + addressstr
self.addrtext.setHtml(str(addressstr))
else:
self.addrtext.setHtml(_("Press Enter"))
pin_changed('')
cardpin = QHBoxLayout()
cardpin.addWidget(QLabel(_("Enter PIN:")))
self.cardtxt = QLineEdit()
self.cardtxt.setEchoMode(2)
self.cardtxt.setMaxLength(len(self.idxs))
self.cardtxt.textChanged.connect(pin_changed)
self.cardtxt.returnPressed.connect(return_pin)
cardpin.addWidget(self.cardtxt)
cardpin.addWidget(QLabel(_("NOT DEVICE PIN - see above")))
cardpin.addStretch(1)
card.addLayout(cardpin)
self.cardbox.setVisible(self.cfg['mode'] == 1)
vbox.addWidget(self.cardbox)
self.update_dlg()
def populate_modes(self):
self.modes.blockSignals(True)
self.modes.clear()
self.modes.addItem(_("Summary Text PIN (requires dongle replugging)") if self.txdata['confirmationType'] == 1 else _("Summary Text PIN is Disabled"))
if self.txdata['confirmationType'] > 1:
self.modes.addItem(_("Security Card Challenge"))
self.modes.blockSignals(False)
def update_dlg(self):
self.modes.setCurrentIndex(self.cfg['mode'])
self.modebox.setVisible(True)
self.helpmsg.setText(helpTxt[self.cfg['mode']])
self.helpmsg.setMinimumHeight(180 if self.txdata['confirmationType'] == 1 else 100)
self.helpmsg.setVisible(True)
self.pinbox.setVisible(self.cfg['mode'] == 0)
self.cardbox.setVisible(self.cfg['mode'] == 1)
self.pintxt.setFocus(True) if self.cfg['mode'] == 0 else self.cardtxt.setFocus(True)
self.setMaximumHeight(200)
def getDevice2FAMode(self):
apdu = [0xe0, 0x24, 0x01, 0x00, 0x00, 0x01] # get 2fa mode
try:
mode = self.dongle.exchange( bytearray(apdu) )
return mode
except BTChipException as e:
print_error('Device getMode Failed')
return 0x11
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.