repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Dreizan/csci1200OnlineCourse
|
modules/course_explorer/course_explorer.py
|
19
|
2859
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Course explorer module."""
__author__ = 'Rahul Singal ([email protected])'
from common import safe_dom
from controllers import utils
from models import custom_modules
from models.config import ConfigProperty
from models.models import StudentProfileDAO
from modules.course_explorer import student
from google.appengine.api import users
GCB_ENABLE_COURSE_EXPLORER_PAGE = ConfigProperty(
'gcb_enable_course_explorer_page', bool,
safe_dom.NodeList().append(
safe_dom.Element('p').add_text("""
If this option is selected, "/" redirects to the course explorer page.
Otherwise, it redirects to the preview page for the default course.""")
), False, multiline=False, validator=None)
custom_module = None
class ExplorerPageInitializer(utils.PageInitializer):
"""Page initializer for explorer page.
Allow links to the course explorer to be added
to the navbars of all course pages.
"""
@classmethod
def initialize(cls, template_values):
template_values.update(
{'show_course_explorer_tab': GCB_ENABLE_COURSE_EXPLORER_PAGE.value})
user = users.get_current_user()
if user:
profile = StudentProfileDAO.get_profile_by_user_id(
users.get_current_user().user_id())
template_values.update({'has_global_profile': profile is not None})
def register_module():
"""Registers this module in the registry."""
# set the page initializer
utils.PageInitializerService.set(ExplorerPageInitializer)
# setup routes
explorer_routes = [
('/', student.IndexPageHandler),
('/explorer', student.AllCoursesHandler),
(r'/explorer/assets/(.*)', student.AssetsHandler),
('/explorer/courses', student.RegisteredCoursesHandler),
('/explorer/profile', student.ProfileHandler)]
global custom_module
custom_module = custom_modules.Module(
'Course Explorer',
'A set of pages for delivering an online course.',
explorer_routes, [])
return custom_module
def unregister_module():
"""Unregisters this module in the registry."""
# set the page intializer to default.
utils.PageInitializerService.set(utils.DefaultPageInitializer)
return custom_modules
|
apache-2.0
|
Alwnikrotikz/open-hea
|
src/openhea/xlrd/compdoc.py
|
64
|
14974
|
# -*- coding: cp1252 -*-
##
# Implements the minimal functionality required
# to extract a "Workbook" or "Book" stream (as one big string)
# from an OLE2 Compound Document file.
# <p>Copyright © 2005-2008 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
##
# No part of the content of this file was derived from the works of David Giffin.
# 2008-11-04 SJM Avoid assertion error when -1 used instead of -2 for first_SID of empty SCSS [Frank Hoffsuemmer]
# 2007-09-08 SJM Warning message if sector sizes are extremely large.
# 2007-05-07 SJM Meaningful exception instead of IndexError if a SAT (sector allocation table) is corrupted.
# 2007-04-22 SJM Missing "<" in a struct.unpack call => can't open files on bigendian platforms.
import sys
from struct import unpack
from timemachine import *
##
# Magic cookie that should appear in the first 8 bytes of the file.
SIGNATURE = "\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1"
EOCSID = -2
FREESID = -1
SATSID = -3
MSATSID = -4
class CompDocError(Exception):
pass
class DirNode(object):
def __init__(self, DID, dent, DEBUG=0):
# dent is the 128-byte directory entry
self.DID = DID
# (cbufsize, self.etype, self.colour, self.left_DID, self.right_DID,
# self.root_DID,
# self.first_SID,
# self.tot_size) = \
# unpack('<HBBiii16x4x8x8xii4x', dent[64:128])
(cbufsize, self.etype, self.colour, self.left_DID, self.right_DID,
self.root_DID) = \
unpack('<HBBiii', dent[64:80])
(self.first_SID, self.tot_size) = \
unpack('<ii', dent[116:124])
if cbufsize == 0:
self.name = u''
else:
self.name = unicode(dent[0:cbufsize-2], 'utf_16_le') # omit the trailing U+0000
self.children = [] # filled in later
self.parent = -1 # indicates orphan; fixed up later
self.tsinfo = unpack('<IIII', dent[100:116])
if DEBUG:
self.dump(DEBUG)
def dump(self, DEBUG=1):
print "DID=%d name=%r etype=%d DIDs(left=%d right=%d root=%d parent=%d kids=%r) first_SID=%d tot_size=%d" \
% (self.DID, self.name, self.etype, self.left_DID,
self.right_DID, self.root_DID, self.parent, self.children, self.first_SID, self.tot_size)
if DEBUG == 2:
# cre_lo, cre_hi, mod_lo, mod_hi = tsinfo
print "timestamp info", self.tsinfo
def _build_family_tree(dirlist, parent_DID, child_DID):
if child_DID < 0: return
_build_family_tree(dirlist, parent_DID, dirlist[child_DID].left_DID)
dirlist[parent_DID].children.append(child_DID)
dirlist[child_DID].parent = parent_DID
_build_family_tree(dirlist, parent_DID, dirlist[child_DID].right_DID)
if dirlist[child_DID].etype == 1: # storage
_build_family_tree(dirlist, child_DID, dirlist[child_DID].root_DID)
##
# Compound document handler.
# @param mem The raw contents of the file, as a string, or as an mmap.mmap() object. The
# only operation it needs to support is slicing.
class CompDoc(object):
def __init__(self, mem, logfile=sys.stdout, DEBUG=0):
self.logfile = logfile
if mem[0:8] != SIGNATURE:
raise CompDocError('Not an OLE2 compound document')
if mem[28:30] != '\xFE\xFF':
raise CompDocError('Expected "little-endian" marker, found %r' % mem[28:30])
revision, version = unpack('<HH', mem[24:28])
if DEBUG:
print >> logfile, "\nCompDoc format: version=0x%04x revision=0x%04x" % (version, revision)
self.mem = mem
ssz, sssz = unpack('<HH', mem[30:34])
if ssz > 20: # allows for 2**20 bytes i.e. 1MB
print >> logfile, \
"WARNING: sector size (2**%d) is preposterous; assuming 512 and continuing ..." \
% ssz
ssz = 9
if sssz > ssz:
print >> logfile, \
"WARNING: short stream sector size (2**%d) is preposterous; assuming 64 and continuing ..." \
% sssz
sssz = 6
self.sec_size = sec_size = 1 << ssz
self.short_sec_size = 1 << sssz
(
SAT_tot_secs, self.dir_first_sec_sid, _unused, self.min_size_std_stream,
SSAT_first_sec_sid, SSAT_tot_secs,
MSAT_first_sec_sid, MSAT_tot_secs,
# ) = unpack('<ii4xiiiii', mem[44:76])
) = unpack('<iiiiiiii', mem[44:76])
mem_data_len = len(mem) - 512
mem_data_secs, left_over = divmod(mem_data_len, sec_size)
if left_over:
#### raise CompDocError("Not a whole number of sectors")
print >> logfile, \
"WARNING *** file size (%d) not 512 + multiple of sector size (%d)" \
% (len(mem), sec_size)
if DEBUG:
print >> logfile, 'sec sizes', ssz, sssz, sec_size, self.short_sec_size
print >> logfile, "mem data: %d bytes == %d sectors" % (mem_data_len, mem_data_secs)
print >> logfile, "SAT_tot_secs=%d, dir_first_sec_sid=%d, min_size_std_stream=%d" \
% (SAT_tot_secs, self.dir_first_sec_sid, self.min_size_std_stream,)
print >> logfile, "SSAT_first_sec_sid=%d, SSAT_tot_secs=%d" % (SSAT_first_sec_sid, SSAT_tot_secs,)
print >> logfile, "MSAT_first_sec_sid=%d, MSAT_tot_secs=%d" % (MSAT_first_sec_sid, MSAT_tot_secs,)
nent = int_floor_div(sec_size, 4) # number of SID entries in a sector
fmt = "<%di" % nent
trunc_warned = 0
#
# === build the MSAT ===
#
MSAT = list(unpack('<109i', mem[76:512]))
sid = MSAT_first_sec_sid
while sid >= 0:
if sid >= mem_data_secs:
raise CompDocError(
"MSAT extension: accessing sector %d but only %d in file" % (sid, mem_data_secs)
)
offset = 512 + sec_size * sid
news = list(unpack(fmt, mem[offset:offset+sec_size]))
sid = news.pop()
MSAT.extend(news)
if DEBUG:
print >> logfile, "MSAT: len =", len(MSAT)
print >> logfile, MSAT
#
# === build the SAT ===
#
self.SAT = []
for msid in MSAT:
if msid == FREESID: continue
if msid >= mem_data_secs:
if not trunc_warned:
print >> logfile, "WARNING *** File is truncated, or OLE2 MSAT is corrupt!!"
print >> logfile, \
"INFO: Trying to access sector %d but only %d available" \
% (msid, mem_data_secs)
trunc_warned = 1
continue
offset = 512 + sec_size * msid
news = list(unpack(fmt, mem[offset:offset+sec_size]))
self.SAT.extend(news)
if DEBUG:
print >> logfile, "SAT: len =", len(self.SAT)
print >> logfile, self.SAT
# print >> logfile, "SAT ",
# for i, s in enumerate(self.SAT):
# print >> logfile, "entry: %4d offset: %6d, next entry: %4d" % (i, 512 + sec_size * i, s)
# print >> logfile, "%d:%d " % (i, s),
print
# === build the directory ===
#
dbytes = self._get_stream(
self.mem, 512, self.SAT, self.sec_size, self.dir_first_sec_sid,
name="directory")
dirlist = []
did = -1
for pos in xrange(0, len(dbytes), 128):
did += 1
dirlist.append(DirNode(did, dbytes[pos:pos+128], 0))
self.dirlist = dirlist
_build_family_tree(dirlist, 0, dirlist[0].root_DID) # and stand well back ...
if DEBUG:
for d in dirlist:
d.dump(DEBUG)
#
# === get the SSCS ===
#
sscs_dir = self.dirlist[0]
assert sscs_dir.etype == 5 # root entry
if sscs_dir.first_SID < 0 and sscs_dir.tot_size == 0:
# Problem reported by Frank Hoffsuemmer: some software was
# writing -1 instead of -2 (EOCSID) for the first_SID
# when the SCCS was empty. Not having EOCSID caused assertion
# failure in _get_stream.
# Solution: avoid calling _get_stream in any case when the
# SCSS appears to be empty.
self.SSCS = ""
else:
self.SSCS = self._get_stream(
self.mem, 512, self.SAT, sec_size, sscs_dir.first_SID,
sscs_dir.tot_size, name="SSCS")
# if DEBUG: print >> logfile, "SSCS", repr(self.SSCS)
#
# === build the SSAT ===
#
self.SSAT = []
if SSAT_tot_secs > 0 and sscs_dir.tot_size == 0:
print >> logfile, \
"WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero"
if sscs_dir.tot_size > 0:
sid = SSAT_first_sec_sid
nsecs = SSAT_tot_secs
while sid >= 0 and nsecs > 0:
nsecs -= 1
start_pos = 512 + sid * sec_size
news = list(unpack(fmt, mem[start_pos:start_pos+sec_size]))
self.SSAT.extend(news)
sid = self.SAT[sid]
# assert SSAT_tot_secs == 0 or sid == EOCSID
if DEBUG: print >> logfile, "SSAT last sid %d; remaining sectors %d" % (sid, nsecs)
assert nsecs == 0 and sid == EOCSID
if DEBUG: print >> logfile, "SSAT", self.SSAT
def _get_stream(self, mem, base, sat, sec_size, start_sid, size=None, name=''):
# print >> self.logfile, "_get_stream", base, sec_size, start_sid, size
sectors = []
s = start_sid
if size is None:
# nothing to check against
while s >= 0:
start_pos = base + s * sec_size
sectors.append(mem[start_pos:start_pos+sec_size])
try:
s = sat[s]
except IndexError:
raise CompDocError(
"OLE2 stream %r: sector allocation table invalid entry (%d)" %
(name, s)
)
assert s == EOCSID
else:
todo = size
while s >= 0:
start_pos = base + s * sec_size
grab = sec_size
if grab > todo:
grab = todo
todo -= grab
sectors.append(mem[start_pos:start_pos+grab])
try:
s = sat[s]
except IndexError:
raise CompDocError(
"OLE2 stream %r: sector allocation table invalid entry (%d)" %
(name, s)
)
assert s == EOCSID
if todo != 0:
print >> self.logfile, \
"WARNING *** OLE2 stream %r: expected size %d, actual size %d" \
% (name, size, size - todo)
return ''.join(sectors)
def _dir_search(self, path, storage_DID=0):
# Return matching DirNode instance, or None
head = path[0]
tail = path[1:]
dl = self.dirlist
for child in dl[storage_DID].children:
if dl[child].name.lower() == head.lower():
et = dl[child].etype
if et == 2:
return dl[child]
if et == 1:
if not tail:
raise CompDocError("Requested component is a 'storage'")
return self._dir_search(tail, child)
dl[child].dump(1)
raise CompDocError("Requested stream is not a 'user stream'")
return None
##
# Interrogate the compound document's directory; return the stream as a string if found, otherwise
# return None.
# @param qname Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto.
def get_named_stream(self, qname):
d = self._dir_search(qname.split("/"))
if d is None:
return None
if d.tot_size >= self.min_size_std_stream:
return self._get_stream(
self.mem, 512, self.SAT, self.sec_size, d.first_SID,
d.tot_size, name=qname)
else:
return self._get_stream(
self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID,
d.tot_size, name=qname + " (from SSCS)")
##
# Interrogate the compound document's directory.
# If the named stream is not found, (None, 0, 0) will be returned.
# If the named stream is found and is contiguous within the original byte sequence ("mem")
# used when the document was opened,
# then (mem, offset_to_start_of_stream, length_of_stream) is returned.
# Otherwise a new string is built from the fragments and (new_string, 0, length_of_stream) is returned.
# @param qname Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto.
def locate_named_stream(self, qname):
d = self._dir_search(qname.split("/"))
if d is None:
return (None, 0, 0)
if d.tot_size >= self.min_size_std_stream:
return self._locate_stream(self.mem, 512, self.SAT, self.sec_size, d.first_SID, d.tot_size)
else:
return (
self._get_stream(
self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID,
d.tot_size, qname + " (from SSCS)"),
0,
d.tot_size
)
return (None, 0, 0) # not found
def _locate_stream(self, mem, base, sat, sec_size, start_sid, size):
# print >> self.logfile, "_locate_stream", base, sec_size, start_sid, size
s = start_sid
if s < 0:
raise CompDocError("_locate_stream: start_sid (%d) is -ve" % start_sid)
p = -99 # dummy previous SID
start_pos = -9999
end_pos = -8888
slices = []
while s >= 0:
if s == p+1:
# contiguous sectors
end_pos += sec_size
else:
# start new slice
if p >= 0:
# not first time
slices.append((start_pos, end_pos))
start_pos = base + s * sec_size
end_pos = start_pos + sec_size
p = s
s = sat[s]
assert s == EOCSID
# print >> self.logfile, len(slices) + 1, "slices"
if not slices:
# The stream is contiguous ... just what we like!
return (mem, start_pos, size)
slices.append((start_pos, end_pos))
return (''.join([mem[start_pos:end_pos] for start_pos, end_pos in slices]), 0, size)
# ==========================================================================================
|
lgpl-2.1
|
mpSchrader/gym-sokoban
|
examples/Human_Playing_Commandline.py
|
1
|
3725
|
import gym
import gym_sokoban
import time
from PIL import Image
import numpy as np
import argparse
import os
parser = argparse.ArgumentParser(description='Run environment with random selected actions.')
parser.add_argument('--rounds', '-r', metavar='rounds', type=int,
help='number of rounds to play (default: 1)', default=1)
parser.add_argument('--steps', '-s', metavar='steps', type=int,
help='maximum number of steps to be played each round (default: 300)', default=300)
parser.add_argument('--env', '-e', metavar='env',
help='Environment to load (default: Sokoban-v0)', default='Sokoban-v0')
parser.add_argument('--save', action='store_true',
help='Save images of single steps')
parser.add_argument('--gifs', action='store_true',
help='Generate Gif files from images')
parser.add_argument('--render_mode', '-m', metavar='render_mode',
help='Render Mode (default: human)', default='human')
args = parser.parse_args()
env_name = args.env
n_rounds = args.rounds
n_steps = args.steps
save_images = args.save or args.gifs
generate_gifs = args.gifs
render_mode = args.render_mode
observation_mode = 'tiny_rgb_array' if 'tiny' in render_mode else 'rgb_array'
scale_image = 16
# Creating target directory if images are to be stored
if save_images and not os.path.exists('images'):
try:
os.makedirs('images')
except OSError:
print('Error: Creating images target directory. ')
ts = time.time()
env = gym.make(env_name)
ACTION_LOOKUP = env.unwrapped.get_action_lookup()
print("Created environment: {}".format(env_name))
def print_available_actions():
"""
Prints all available actions nicely formatted..
:return:
"""
available_actions_list = []
for i in range(len(ACTION_LOOKUP)):
available_actions_list.append(
'Key: {} - Action: {}'.format(i, ACTION_LOOKUP[i])
)
display_actions = '\n'.join(available_actions_list)
print()
print('Action out of Range!')
print('Available Actions:\n{}'.format(display_actions))
print()
for i_episode in range(n_rounds):
print('Starting new game!')
observation = env.reset()
for t in range(n_steps):
env.render(render_mode, scale=scale_image)
action = input('Select action: ')
try:
action = int(action)
if not action in range(len(ACTION_LOOKUP)):
raise ValueError
except ValueError:
print_available_actions()
continue
observation, reward, done, info = env.step(action, observation_mode=observation_mode)
print(ACTION_LOOKUP[action], reward, done, info)
print(len(observation), len(observation[0]), len(observation[0][0]))
if save_images:
img = Image.fromarray(np.array(env.render(render_mode, scale=scale_image)), 'RGB')
img.save(os.path.join('images', 'observation_{}_{}.png'.format(i_episode, t)))
if done:
print("Episode finished after {} timesteps".format(t+1))
env.render(render_mode, scale=scale_image)
break
if generate_gifs:
print('')
import imageio
with imageio.get_writer(os.path.join('images', 'round_{}.gif'.format(i_episode)), mode='I', fps=1) as writer:
for t in range(n_steps):
try:
filename = os.path.join('images', 'observation_{}_{}.png'.format(i_episode, t))
image = imageio.imread(filename)
writer.append_data(image)
except:
pass
env.close()
time.sleep(10)
|
mit
|
popazerty/enigma2-4.3
|
lib/python/Components/Sources/EventInfo.py
|
85
|
1245
|
from Components.PerServiceDisplay import PerServiceBase
from Components.Element import cached
from enigma import iPlayableService, iServiceInformation, eServiceReference, eEPGCache
from Source import Source
class EventInfo(PerServiceBase, Source, object):
NOW = 0
NEXT = 1
def __init__(self, navcore, now_or_next):
Source.__init__(self)
PerServiceBase.__init__(self, navcore,
{
iPlayableService.evStart: self.gotEvent,
iPlayableService.evUpdatedEventInfo: self.gotEvent,
iPlayableService.evEnd: self.gotEvent
}, with_event=True)
self.now_or_next = now_or_next
self.epgQuery = eEPGCache.getInstance().lookupEventTime
@cached
def getEvent(self):
service = self.navcore.getCurrentService()
info = service and service.info()
ret = info and info.getEvent(self.now_or_next)
if not ret and info:
refstr = info.getInfoString(iServiceInformation.sServiceref)
ret = self.epgQuery(eServiceReference(refstr), -1, self.now_or_next and 1 or 0)
return ret
event = property(getEvent)
def gotEvent(self, what):
if what == iPlayableService.evEnd:
self.changed((self.CHANGED_CLEAR,))
else:
self.changed((self.CHANGED_ALL,))
def destroy(self):
PerServiceBase.destroy(self)
Source.destroy(self)
|
gpl-2.0
|
xaviercobain88/framework-python
|
build/lib.linux-i686-2.7/openerp/addons/stock/stock.py
|
2
|
157623
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from operator import itemgetter
from itertools import groupby
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import netsvc
from openerp import tools
from openerp.tools import float_compare, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
import logging
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
_name = "stock.incoterms"
_description = "Incoterms"
_columns = {
'name': fields.char('Name', size=64, required=True, help="Incoterms are series of sales terms.They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
'code': fields.char('Code', size=3, required=True, help="Code for Incoterms"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM without deleting it."),
}
_defaults = {
'active': True,
}
stock_incoterms()
class stock_journal(osv.osv):
_name = "stock.journal"
_description = "Stock Journal"
_columns = {
'name': fields.char('Stock Journal', size=32, required=True),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'user_id': lambda s, c, u, ctx: u
}
stock_journal()
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_name = "stock.location"
_description = "Location"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'posz,name'
_order = 'parent_left'
def name_get(self, cr, uid, ids, context=None):
# always return the full hierarchical name
res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context)
return res.items()
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
names = [m.name]
parent = m.location_id
while parent:
names.append(parent.name)
parent = parent.location_id
res[m.id] = ' / '.join(reversed(names))
return res
def _get_sublocations(self, cr, uid, ids, context=None):
""" return all sublocations of the given stock locations (included) """
return self.search(cr, uid, [('id', 'child_of', ids)], context=context)
def _product_value(self, cr, uid, ids, field_names, arg, context=None):
"""Computes stock value (real and virtual) for a product, as well as stock qty (real and virtual).
@param field_names: Name of field
@return: Dictionary of values
"""
prod_id = context and context.get('product_id', False)
if not prod_id:
return dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])
product_product_obj = self.pool.get('product.product')
cr.execute('select distinct product_id, location_id from stock_move where location_id in %s', (tuple(ids), ))
dict1 = cr.dictfetchall()
cr.execute('select distinct product_id, location_dest_id as location_id from stock_move where location_dest_id in %s', (tuple(ids), ))
dict2 = cr.dictfetchall()
res_products_by_location = sorted(dict1+dict2, key=itemgetter('location_id'))
products_by_location = dict((k, [v['product_id'] for v in itr]) for k, itr in groupby(res_products_by_location, itemgetter('location_id')))
result = dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])
result.update(dict([(i, {}.fromkeys(field_names, 0.0)) for i in list(set([aaa['location_id'] for aaa in res_products_by_location]))]))
currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id
currency_obj = self.pool.get('res.currency')
currency = currency_obj.browse(cr, uid, currency_id, context=context)
for loc_id, product_ids in products_by_location.items():
if prod_id:
product_ids = [prod_id]
c = (context or {}).copy()
c['location'] = loc_id
for prod in product_product_obj.browse(cr, uid, product_ids, context=c):
for f in field_names:
if f == 'stock_real':
if loc_id not in result:
result[loc_id] = {}
result[loc_id][f] += prod.qty_available
elif f == 'stock_virtual':
result[loc_id][f] += prod.virtual_available
elif f == 'stock_real_value':
amount = prod.qty_available * prod.standard_price
amount = currency_obj.round(cr, uid, currency, amount)
result[loc_id][f] += amount
elif f == 'stock_virtual_value':
amount = prod.virtual_available * prod.standard_price
amount = currency_obj.round(cr, uid, currency, amount)
result[loc_id][f] += amount
return result
_columns = {
'name': fields.char('Location Name', size=64, required=True, translate=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
'usage': fields.selection([('supplier', 'Supplier Location'), ('view', 'View'), ('internal', 'Internal Location'), ('customer', 'Customer Location'), ('inventory', 'Inventory'), ('procurement', 'Procurement'), ('production', 'Production'), ('transit', 'Transit Location for Inter-Companies Transfers')], 'Location Type', required=True,
help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers
\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
\n* Internal Location: Physical locations inside your own warehouses,
\n* Customer Location: Virtual location representing the destination location for products sent to your customers
\n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
""", select = True),
# temporarily removed, as it's unused: 'allocation_method': fields.selection([('fifo', 'FIFO'), ('lifo', 'LIFO'), ('nearest', 'Nearest')], 'Allocation Method', required=True),
'complete_name': fields.function(_complete_name, type='char', size=256, string="Location Name",
store={'stock.location': (_get_sublocations, ['name', 'location_id'], 10)}),
'stock_real': fields.function(_product_value, type='float', string='Real Stock', multi="stock"),
'stock_virtual': fields.function(_product_value, type='float', string='Virtual Stock', multi="stock"),
'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),
'chained_journal_id': fields.many2one('stock.journal', 'Chaining Journal',help="Inventory Journal in which the chained move will be written, if the Chaining Type is not Transparent (no journal is used if left empty)"),
'chained_location_id': fields.many2one('stock.location', 'Chained Location If Fixed'),
'chained_location_type': fields.selection([('none', 'None'), ('customer', 'Customer'), ('fixed', 'Fixed Location')],
'Chained Location Type', required=True,
help="Determines whether this location is chained to another location, i.e. any incoming product in this location \n" \
"should next go to the chained location. The chained location is determined according to the type :"\
"\n* None: No chaining at all"\
"\n* Customer: The chained location will be taken from the Customer Location field on the Partner form of the Partner that is specified in the Picking list of the incoming products." \
"\n* Fixed Location: The chained location is taken from the next field: Chained Location if Fixed." \
),
'chained_auto_packing': fields.selection(
[('auto', 'Automatic Move'), ('manual', 'Manual Operation'), ('transparent', 'Automatic No Step Added')],
'Chaining Type',
required=True,
help="This is used only if you select a chained location type.\n" \
"The 'Automatic Move' value will create a stock move after the current one that will be "\
"validated automatically. With 'Manual Operation', the stock move has to be validated "\
"by a worker. With 'Automatic No Step Added', the location is replaced in the original move."
),
'chained_picking_type': fields.selection([('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], 'Shipping Type', help="Shipping Type of the Picking List that will contain the chained move (leave empty to automatically detect the type based on the source and destination locations)."),
'chained_company_id': fields.many2one('res.company', 'Chained Company', help='The company the Picking List containing the chained move will belong to (leave empty to use the default company determination rules'),
'chained_delay': fields.integer('Chaining Lead Time',help="Delay between original move and chained move in days"),
'partner_id': fields.many2one('res.partner', 'Location Address',help="Address of customer or supplier."),
'icon': fields.selection(tools.icons, 'Icon', size=64,help="Icon show in hierarchical tree view"),
'comment': fields.text('Additional Information'),
'posx': fields.integer('Corridor (X)',help="Optional localization details, for information purpose only"),
'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'stock_real_value': fields.function(_product_value, type='float', string='Real Stock Value', multi="stock", digits_compute=dp.get_precision('Account')),
'stock_virtual_value': fields.function(_product_value, type='float', string='Virtual Stock Value', multi="stock", digits_compute=dp.get_precision('Account')),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between all companies'),
'scrap_location': fields.boolean('Scrap Location', help='Check this box to allow using this location to put scrapped/damaged goods.'),
'valuation_in_account_id': fields.many2one('account.account', 'Stock Valuation Account (Incoming)', domain = [('type','=','other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved from an internal location "
"into this location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
'valuation_out_account_id': fields.many2one('account.account', 'Stock Valuation Account (Outgoing)', domain = [('type','=','other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved out of this location "
"and into an internal location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
}
_defaults = {
'active': True,
'usage': 'internal',
'chained_location_type': 'none',
'chained_auto_packing': 'manual',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c),
'posx': 0,
'posy': 0,
'posz': 0,
'icon': False,
'scrap_location': False,
}
def chained_location_get(self, cr, uid, location, partner=None, product=None, context=None):
""" Finds chained location
@param location: Location id
@param partner: Partner id
@param product: Product id
@return: List of values
"""
result = None
if location.chained_location_type == 'customer':
if partner:
result = partner.property_stock_customer
elif location.chained_location_type == 'fixed':
result = location.chained_location_id
if result:
return result, location.chained_auto_packing, location.chained_delay, location.chained_journal_id and location.chained_journal_id.id or False, location.chained_company_id and location.chained_company_id.id or False, location.chained_picking_type, False
return result
def picking_type_get(self, cr, uid, from_location, to_location, context=None):
""" Gets type of picking.
@param from_location: Source location
@param to_location: Destination location
@return: Location type
"""
result = 'internal'
if (from_location.usage=='internal') and (to_location and to_location.usage in ('customer', 'supplier')):
result = 'out'
elif (from_location.usage in ('supplier', 'customer')) and (to_location.usage == 'internal'):
result = 'in'
return result
def _product_get_all_report(self, cr, uid, ids, product_ids=False, context=None):
return self._product_get_report(cr, uid, ids, product_ids, context, recursive=True)
def _product_get_report(self, cr, uid, ids, product_ids=False,
context=None, recursive=False):
""" Finds the product quantity and price for particular location.
@param product_ids: Ids of product
@param recursive: True or False
@return: Dictionary of values
"""
if context is None:
context = {}
product_obj = self.pool.get('product.product')
# Take the user company and pricetype
context['currency_id'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
# To be able to offer recursive or non-recursive reports we need to prevent recursive quantities by default
context['compute_child'] = False
if not product_ids:
product_ids = product_obj.search(cr, uid, [], context={'active_test': False})
products = product_obj.browse(cr, uid, product_ids, context=context)
products_by_uom = {}
products_by_id = {}
for product in products:
products_by_uom.setdefault(product.uom_id.id, [])
products_by_uom[product.uom_id.id].append(product)
products_by_id.setdefault(product.id, [])
products_by_id[product.id] = product
result = {}
result['product'] = []
for id in ids:
quantity_total = 0.0
total_price = 0.0
for uom_id in products_by_uom.keys():
fnc = self._product_get
if recursive:
fnc = self._product_all_get
ctx = context.copy()
ctx['uom'] = uom_id
qty = fnc(cr, uid, id, [x.id for x in products_by_uom[uom_id]],
context=ctx)
for product_id in qty.keys():
if not qty[product_id]:
continue
product = products_by_id[product_id]
quantity_total += qty[product_id]
# Compute based on pricetype
# Choose the right filed standard_price to read
amount_unit = product.price_get('standard_price', context=context)[product.id]
price = qty[product_id] * amount_unit
total_price += price
result['product'].append({
'price': amount_unit,
'prod_name': product.name,
'code': product.default_code, # used by lot_overview_all report!
'variants': product.variants or '',
'uom': product.uom_id.name,
'prod_qty': qty[product_id],
'price_value': price,
})
result['total'] = quantity_total
result['total_price'] = total_price
return result
def _product_get_multi_location(self, cr, uid, ids, product_ids=False, context=None,
states=['done'], what=('in', 'out')):
"""
@param product_ids: Ids of product
@param states: List of states
@param what: Tuple of
@return:
"""
product_obj = self.pool.get('product.product')
if context is None:
context = {}
context.update({
'states': states,
'what': what,
'location': ids
})
return product_obj.get_product_available(cr, uid, product_ids, context=context)
def _product_get(self, cr, uid, id, product_ids=False, context=None, states=None):
"""
@param product_ids:
@param states:
@return:
"""
if states is None:
states = ['done']
ids = id and [id] or []
return self._product_get_multi_location(cr, uid, ids, product_ids, context=context, states=states)
def _product_all_get(self, cr, uid, id, product_ids=False, context=None, states=None):
if states is None:
states = ['done']
# build the list of ids of children of the location given by id
ids = id and [id] or []
location_ids = self.search(cr, uid, [('location_id', 'child_of', ids)])
return self._product_get_multi_location(cr, uid, location_ids, product_ids, context, states)
def _product_virtual_get(self, cr, uid, id, product_ids=False, context=None, states=None):
if states is None:
states = ['done']
return self._product_all_get(cr, uid, id, product_ids, context, ['confirmed', 'waiting', 'assigned', 'done'])
def _product_reserve(self, cr, uid, ids, product_id, product_qty, context=None, lock=False):
"""
Attempt to find a quantity ``product_qty`` (in the product's default uom or the uom passed in ``context``) of product ``product_id``
in locations with id ``ids`` and their child locations. If ``lock`` is True, the stock.move lines
of product with id ``product_id`` in the searched location will be write-locked using Postgres's
"FOR UPDATE NOWAIT" option until the transaction is committed or rolled back, to prevent reservin
twice the same products.
If ``lock`` is True and the lock cannot be obtained (because another transaction has locked some of
the same stock.move lines), a log line will be output and False will be returned, as if there was
not enough stock.
:param product_id: Id of product to reserve
:param product_qty: Quantity of product to reserve (in the product's default uom or the uom passed in ``context``)
:param lock: if True, the stock.move lines of product with id ``product_id`` in all locations (and children locations) with ``ids`` will
be write-locked using postgres's "FOR UPDATE NOWAIT" option until the transaction is committed or rolled back. This is
to prevent reserving twice the same products.
:param context: optional context dictionary: if a 'uom' key is present it will be used instead of the default product uom to
compute the ``product_qty`` and in the return value.
:return: List of tuples in the form (qty, location_id) with the (partial) quantities that can be taken in each location to
reach the requested product_qty (``qty`` is expressed in the default uom of the product), of False if enough
products could not be found, or the lock could not be obtained (and ``lock`` was True).
"""
result = []
amount = 0.0
if context is None:
context = {}
uom_obj = self.pool.get('product.uom')
uom_rounding = self.pool.get('product.product').browse(cr, uid, product_id, context=context).uom_id.rounding
if context.get('uom'):
uom_rounding = uom_obj.browse(cr, uid, context.get('uom'), context=context).rounding
for id in self.search(cr, uid, [('location_id', 'child_of', ids)]):
if lock:
try:
# Must lock with a separate select query because FOR UPDATE can't be used with
# aggregation/group by's (when individual rows aren't identifiable).
# We use a SAVEPOINT to be able to rollback this part of the transaction without
# failing the whole transaction in case the LOCK cannot be acquired.
cr.execute("SAVEPOINT stock_location_product_reserve")
cr.execute("""SELECT id FROM stock_move
WHERE product_id=%s AND
(
(location_dest_id=%s AND
location_id<>%s AND
state='done')
OR
(location_id=%s AND
location_dest_id<>%s AND
state in ('done', 'assigned'))
)
FOR UPDATE of stock_move NOWAIT""", (product_id, id, id, id, id), log_exceptions=False)
except Exception:
# Here it's likely that the FOR UPDATE NOWAIT failed to get the LOCK,
# so we ROLLBACK to the SAVEPOINT to restore the transaction to its earlier
# state, we return False as if the products were not available, and log it:
cr.execute("ROLLBACK TO stock_location_product_reserve")
_logger.warning("Failed attempt to reserve %s x product %s, likely due to another transaction already in progress. Next attempt is likely to work. Detailed error available at DEBUG level.", product_qty, product_id)
_logger.debug("Trace of the failed product reservation attempt: ", exc_info=True)
return False
# XXX TODO: rewrite this with one single query, possibly even the quantity conversion
cr.execute("""SELECT product_uom, sum(product_qty) AS product_qty
FROM stock_move
WHERE location_dest_id=%s AND
location_id<>%s AND
product_id=%s AND
state='done'
GROUP BY product_uom
""",
(id, id, product_id))
results = cr.dictfetchall()
cr.execute("""SELECT product_uom,-sum(product_qty) AS product_qty
FROM stock_move
WHERE location_id=%s AND
location_dest_id<>%s AND
product_id=%s AND
state in ('done', 'assigned')
GROUP BY product_uom
""",
(id, id, product_id))
results += cr.dictfetchall()
total = 0.0
results2 = 0.0
for r in results:
amount = uom_obj._compute_qty(cr, uid, r['product_uom'], r['product_qty'], context.get('uom', False))
results2 += amount
total += amount
if total <= 0.0:
continue
amount = results2
compare_qty = float_compare(amount, 0, precision_rounding=uom_rounding)
if compare_qty == 1:
if amount > min(total, product_qty):
amount = min(product_qty, total)
result.append((amount, id))
product_qty -= amount
total -= amount
if product_qty <= 0.0:
return result
if total <= 0.0:
continue
return False
stock_location()
class stock_tracking(osv.osv):
_name = "stock.tracking"
_description = "Packs"
def checksum(sscc):
salt = '31' * 8 + '3'
sum = 0
for sscc_part, salt_part in zip(sscc, salt):
sum += int(sscc_part) * int(salt_part)
return (10 - (sum % 10)) % 10
checksum = staticmethod(checksum)
def make_sscc(self, cr, uid, context=None):
sequence = self.pool.get('ir.sequence').get(cr, uid, 'stock.lot.tracking')
try:
return sequence + str(self.checksum(sequence))
except Exception:
return sequence
_columns = {
'name': fields.char('Pack Reference', size=64, required=True, select=True, help="By default, the pack reference is generated following the sscc standard. (Serial number + 1 check digit)"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a pack without deleting it."),
'serial': fields.char('Additional Reference', size=64, select=True, help="Other reference or serial number"),
'move_ids': fields.one2many('stock.move', 'tracking_id', 'Moves for this pack', readonly=True),
'date': fields.datetime('Creation Date', required=True),
}
_defaults = {
'active': 1,
'name': make_sscc,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
}
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
ids = self.search(cr, user, [('serial', '=', name)]+ args, limit=limit, context=context)
ids += self.search(cr, user, [('name', operator, name)]+ args, limit=limit, context=context)
return self.name_get(cr, user, ids, context)
def name_get(self, cr, uid, ids, context=None):
"""Append the serial to the name"""
if not len(ids):
return []
res = [ (r['id'], r['serial'] and '%s [%s]' % (r['name'], r['serial'])
or r['name'] )
for r in self.read(cr, uid, ids, ['name', 'serial'],
context=context) ]
return res
def unlink(self, cr, uid, ids, context=None):
raise osv.except_osv(_('Error!'), _('You cannot remove a lot line.'))
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of a product
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
return self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)
stock_tracking()
#----------------------------------------------------------
# Stock Picking
#----------------------------------------------------------
class stock_picking(osv.osv):
_name = "stock.picking"
_inherit = ['mail.thread']
_description = "Picking List"
_order = "id desc"
def _set_maximum_date(self, cr, uid, ids, name, value, arg, context=None):
""" Calculates planned date if it is greater than 'value'.
@param name: Name of field
@param value: Value of field
@param arg: User defined argument
@return: True or False
"""
if not value:
return False
if isinstance(ids, (int, long)):
ids = [ids]
for pick in self.browse(cr, uid, ids, context=context):
sql_str = """update stock_move set
date_expected='%s'
where
picking_id=%d """ % (value, pick.id)
if pick.max_date:
sql_str += " and (date_expected='" + pick.max_date + "')"
cr.execute(sql_str)
return True
def _set_minimum_date(self, cr, uid, ids, name, value, arg, context=None):
""" Calculates planned date if it is less than 'value'.
@param name: Name of field
@param value: Value of field
@param arg: User defined argument
@return: True or False
"""
if not value:
return False
if isinstance(ids, (int, long)):
ids = [ids]
for pick in self.browse(cr, uid, ids, context=context):
sql_str = """update stock_move set
date_expected='%s'
where
picking_id=%s """ % (value, pick.id)
if pick.min_date:
sql_str += " and (date_expected='" + pick.min_date + "')"
cr.execute(sql_str)
return True
def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None):
""" Finds minimum and maximum dates for picking.
@return: Dictionary of values
"""
res = {}
for id in ids:
res[id] = {'min_date': False, 'max_date': False}
if not ids:
return res
cr.execute("""select
picking_id,
min(date_expected),
max(date_expected)
from
stock_move
where
picking_id IN %s
group by
picking_id""",(tuple(ids),))
for pick, dt1, dt2 in cr.fetchall():
res[pick]['min_date'] = dt1
res[pick]['max_date'] = dt2
return res
def create(self, cr, user, vals, context=None):
if ('name' not in vals) or (vals.get('name')=='/'):
seq_obj_name = self._name
vals['name'] = self.pool.get('ir.sequence').get(cr, user, seq_obj_name)
new_id = super(stock_picking, self).create(cr, user, vals, context)
return new_id
_columns = {
'name': fields.char('Reference', size=64, select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'origin': fields.char('Source Document', size=64, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Reference of the document", select=True),
'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
'type': fields.selection([('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], 'Shipping Type', required=True, select=True, help="Shipping type specify, goods coming in or going out."),
'note': fields.text('Notes', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'stock_journal_id': fields.many2one('stock.journal','Stock Journal', select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'location_id': fields.many2one('stock.location', 'Location', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Keep empty if you produce at the location where the finished products are needed." \
"Set a location if you produce at a fixed location. This can be a partner location " \
"if you subcontract the manufacturing operations.", select=True),
'location_dest_id': fields.many2one('stock.location', 'Dest. Location', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Location where the system will stock the finished products.", select=True),
'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="It specifies goods to be deliver partially or all at once"),
'state': fields.selection([
('draft', 'Draft'),
('cancel', 'Cancelled'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Transfer'),
('done', 'Transferred'),
], 'Status', readonly=True, select=True, track_visibility='onchange', help="""
* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Ready to Transfer: products reserved, simply waiting for confirmation.\n
* Transferred: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""
),
'min_date': fields.function(get_min_max_date, fnct_inv=_set_minimum_date, multi="min_max_date",
store=True, type='datetime', string='Scheduled Time', select=1, help="Scheduled time for the shipment to be processed"),
'date': fields.datetime('Creation Date', help="Creation date, usually the time of the order.", select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'date_done': fields.datetime('Date of Transfer', help="Date of Completion", states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'max_date': fields.function(get_min_max_date, fnct_inv=_set_maximum_date, multi="min_max_date",
store=True, type='datetime', string='Max. Expected Date', select=2),
'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'),
'auto_picking': fields.boolean('Auto-Picking', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'invoice_state': fields.selection([
("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")], "Invoice Control",
select=True, required=True, readonly=True, track_visibility='onchange', states={'draft': [('readonly', False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
}
_defaults = {
'name': lambda self, cr, uid, context: '/',
'state': 'draft',
'move_type': 'direct',
'type': 'internal',
'invoice_state': 'none',
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c)
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
]
def action_process(self, cr, uid, ids, context=None):
if context is None:
context = {}
"""Open the partial picking wizard"""
context.update({
'active_model': self._name,
'active_ids': ids,
'active_id': len(ids) and ids[0] or False
})
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.partial.picking',
'type': 'ir.actions.act_window',
'target': 'new',
'context': context,
'nodestroy': True,
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
picking_obj = self.browse(cr, uid, id, context=context)
move_obj = self.pool.get('stock.move')
if ('name' not in default) or (picking_obj.name == '/'):
seq_obj_name = 'stock.picking.' + picking_obj.type
default['name'] = self.pool.get('ir.sequence').get(cr, uid, seq_obj_name)
default['origin'] = ''
default['backorder_id'] = False
if 'invoice_state' not in default and picking_obj.invoice_state == 'invoiced':
default['invoice_state'] = '2binvoiced'
res = super(stock_picking, self).copy(cr, uid, id, default, context)
if res:
picking_obj = self.browse(cr, uid, res, context=context)
for move in picking_obj.move_lines:
move_obj.write(cr, uid, [move.id], {'tracking_id': False, 'prodlot_id': False, 'move_history_ids2': [(6, 0, [])], 'move_history_ids': [(6, 0, [])]})
return res
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
if view_type == 'form' and not view_id:
mod_obj = self.pool.get('ir.model.data')
if self._name == "stock.picking.in":
model, view_id = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_in_form')
if self._name == "stock.picking.out":
model, view_id = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_out_form')
return super(stock_picking, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
return {}
def action_explode(self, cr, uid, moves, context=None):
"""Hook to allow other modules to split the moves of a picking."""
return moves
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms picking.
@return: True
"""
pickings = self.browse(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state': 'confirmed'})
todo = []
for picking in pickings:
for r in picking.move_lines:
if r.state == 'draft':
todo.append(r.id)
todo = self.action_explode(cr, uid, todo, context)
if len(todo):
self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context)
return True
def test_auto_picking(self, cr, uid, ids):
# TODO: Check locations to see if in the same location ?
return True
def action_assign(self, cr, uid, ids, *args):
""" Changes state of picking to available if all moves are confirmed.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
if pick.state == 'draft':
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_confirm', cr)
move_ids = [x.id for x in pick.move_lines if x.state == 'confirmed']
if not move_ids:
raise osv.except_osv(_('Warning!'),_('Not enough stock, unable to reserve the products.'))
self.pool.get('stock.move').action_assign(cr, uid, move_ids)
return True
def force_assign(self, cr, uid, ids, *args):
""" Changes state of picking to available if moves are confirmed or waiting.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed','waiting']]
self.pool.get('stock.move').force_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return True
def draft_force_assign(self, cr, uid, ids, *args):
""" Confirms picking directly from draft state.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
if not pick.move_lines:
raise osv.except_osv(_('Error!'),_('You cannot process picking without stock moves.'))
wf_service.trg_validate(uid, 'stock.picking', pick.id,
'button_confirm', cr)
return True
def draft_validate(self, cr, uid, ids, context=None):
""" Validates picking directly from draft state.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
self.draft_force_assign(cr, uid, ids)
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [x.id for x in pick.move_lines]
self.pool.get('stock.move').force_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return self.action_process(
cr, uid, ids, context=context)
def cancel_assign(self, cr, uid, ids, *args):
""" Cancels picking and moves.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
move_ids = [x.id for x in pick.move_lines]
self.pool.get('stock.move').cancel_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return True
def action_assign_wkf(self, cr, uid, ids, context=None):
""" Changes picking state to assigned.
@return: True
"""
self.write(cr, uid, ids, {'state': 'assigned'})
return True
def test_finished(self, cr, uid, ids):
""" Tests whether the move is in done or cancel state or not.
@return: True or False
"""
move_ids = self.pool.get('stock.move').search(cr, uid, [('picking_id', 'in', ids)])
for move in self.pool.get('stock.move').browse(cr, uid, move_ids):
if move.state not in ('done', 'cancel'):
if move.product_qty != 0.0:
return False
else:
move.write({'state': 'done'})
return True
def test_assigned(self, cr, uid, ids):
""" Tests whether the move is in assigned state or not.
@return: True or False
"""
#TOFIX: assignment of move lines should be call before testing assigment otherwise picking never gone in assign state
ok = True
for pick in self.browse(cr, uid, ids):
mt = pick.move_type
# incomming shipments are always set as available if they aren't chained
if pick.type == 'in':
if all([x.state != 'waiting' for x in pick.move_lines]):
return True
for move in pick.move_lines:
if (move.state in ('confirmed', 'draft')) and (mt == 'one'):
return False
if (mt == 'direct') and (move.state == 'assigned') and (move.product_qty):
return True
ok = ok and (move.state in ('cancel', 'done', 'assigned'))
return ok
def action_cancel(self, cr, uid, ids, context=None):
""" Changes picking state to cancel.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
ids2 = [move.id for move in pick.move_lines]
self.pool.get('stock.move').action_cancel(cr, uid, ids2, context)
self.write(cr, uid, ids, {'state': 'cancel', 'invoice_state': 'none'})
return True
#
# TODO: change and create a move if not parents
#
def action_done(self, cr, uid, ids, context=None):
"""Changes picking state to done.
This method is called at the end of the workflow by the activity "done".
@return: True
"""
self.write(cr, uid, ids, {'state': 'done', 'date_done': time.strftime('%Y-%m-%d %H:%M:%S')})
return True
def action_move(self, cr, uid, ids, context=None):
"""Process the Stock Moves of the Picking
This method is called by the workflow by the activity "move".
Normally that happens when the signal button_done is received (button
"Done" pressed on a Picking view).
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
todo = []
for move in pick.move_lines:
if move.state == 'draft':
self.pool.get('stock.move').action_confirm(cr, uid, [move.id],
context=context)
todo.append(move.id)
elif move.state in ('assigned','confirmed'):
todo.append(move.id)
if len(todo):
self.pool.get('stock.move').action_done(cr, uid, todo,
context=context)
return True
def get_currency_id(self, cr, uid, picking):
return False
def _get_partner_to_invoice(self, cr, uid, picking, context=None):
""" Gets the partner that will be invoiced
Note that this function is inherited in the sale and purchase modules
@param picking: object of the picking for which we are selecting the partner to invoice
@return: object of the partner to invoice
"""
return picking.partner_id and picking.partner_id.id
def _get_comment_invoice(self, cr, uid, picking):
"""
@return: comment string for invoice
"""
return picking.note or ''
def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None):
""" Gets price unit for invoice
@param move_line: Stock move lines
@param type: Type of invoice
@return: The price unit for the move line
"""
if context is None:
context = {}
if type in ('in_invoice', 'in_refund'):
# Take the user company and pricetype
context['currency_id'] = move_line.company_id.currency_id.id
amount_unit = move_line.product_id.price_get('standard_price', context=context)[move_line.product_id.id]
return amount_unit
else:
return move_line.product_id.list_price
def _get_discount_invoice(self, cr, uid, move_line):
'''Return the discount for the move line'''
return 0.0
def _get_taxes_invoice(self, cr, uid, move_line, type):
""" Gets taxes on invoice
@param move_line: Stock move lines
@param type: Type of invoice
@return: Taxes Ids for the move line
"""
if type in ('in_invoice', 'in_refund'):
taxes = move_line.product_id.supplier_taxes_id
else:
taxes = move_line.product_id.taxes_id
if move_line.picking_id and move_line.picking_id.partner_id and move_line.picking_id.partner_id.id:
return self.pool.get('account.fiscal.position').map_tax(
cr,
uid,
move_line.picking_id.partner_id.property_account_position,
taxes
)
else:
return map(lambda x: x.id, taxes)
def _get_account_analytic_invoice(self, cr, uid, picking, move_line):
return False
def _invoice_line_hook(self, cr, uid, move_line, invoice_line_id):
'''Call after the creation of the invoice line'''
return
def _invoice_hook(self, cr, uid, picking, invoice_id):
'''Call after the creation of the invoice'''
return
def _get_invoice_type(self, pick):
src_usage = dest_usage = None
inv_type = None
if pick.invoice_state == '2binvoiced':
if pick.move_lines:
src_usage = pick.move_lines[0].location_id.usage
dest_usage = pick.move_lines[0].location_dest_id.usage
if pick.type == 'out' and dest_usage == 'supplier':
inv_type = 'in_refund'
elif pick.type == 'out' and dest_usage == 'customer':
inv_type = 'out_invoice'
elif pick.type == 'in' and src_usage == 'supplier':
inv_type = 'in_invoice'
elif pick.type == 'in' and src_usage == 'customer':
inv_type = 'out_refund'
else:
inv_type = 'out_invoice'
return inv_type
def _prepare_invoice_group(self, cr, uid, picking, partner, invoice, context=None):
""" Builds the dict for grouped invoices
@param picking: picking object
@param partner: object of the partner to invoice (not used here, but may be usefull if this function is inherited)
@param invoice: object of the invoice that we are updating
@return: dict that will be used to update the invoice
"""
comment = self._get_comment_invoice(cr, uid, picking)
return {
'name': (invoice.name or '') + ', ' + (picking.name or ''),
'origin': (invoice.origin or '') + ', ' + (picking.name or '') + (picking.origin and (':' + picking.origin) or ''),
'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''),
'date_invoice': context.get('date_inv', False),
'user_id': uid,
}
def _prepare_invoice(self, cr, uid, picking, partner, inv_type, journal_id, context=None):
""" Builds the dict containing the values for the invoice
@param picking: picking object
@param partner: object of the partner to invoice
@param inv_type: type of the invoice ('out_invoice', 'in_invoice', ...)
@param journal_id: ID of the accounting journal
@return: dict that will be used to create the invoice object
"""
if isinstance(partner, int):
partner = self.pool.get('res.partner').browse(cr, uid, partner, context=context)
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
payment_term = partner.property_payment_term.id or False
else:
account_id = partner.property_account_payable.id
payment_term = partner.property_supplier_payment_term.id or False
comment = self._get_comment_invoice(cr, uid, picking)
invoice_vals = {
'name': picking.name,
'origin': (picking.name or '') + (picking.origin and (':' + picking.origin) or ''),
'type': inv_type,
'account_id': account_id,
'partner_id': partner.id,
'comment': comment,
'payment_term': payment_term,
'fiscal_position': partner.property_account_position.id,
'date_invoice': context.get('date_inv', False),
'company_id': picking.company_id.id,
'user_id': uid,
}
cur_id = self.get_currency_id(cr, uid, picking)
if cur_id:
invoice_vals['currency_id'] = cur_id
if journal_id:
invoice_vals['journal_id'] = journal_id
return invoice_vals
def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id,
invoice_vals, context=None):
""" Builds the dict containing the values for the invoice line
@param group: True or False
@param picking: picking object
@param: move_line: move_line object
@param: invoice_id: ID of the related invoice
@param: invoice_vals: dict used to created the invoice
@return: dict that will be used to create the invoice line
"""
if group:
name = (picking.name or '') + '-' + move_line.name
else:
name = move_line.name
origin = move_line.picking_id.name or ''
if move_line.picking_id.origin:
origin += ':' + move_line.picking_id.origin
if invoice_vals['type'] in ('out_invoice', 'out_refund'):
account_id = move_line.product_id.property_account_income.id
if not account_id:
account_id = move_line.product_id.categ_id.\
property_account_income_categ.id
else:
account_id = move_line.product_id.property_account_expense.id
if not account_id:
account_id = move_line.product_id.categ_id.\
property_account_expense_categ.id
if invoice_vals['fiscal_position']:
fp_obj = self.pool.get('account.fiscal.position')
fiscal_position = fp_obj.browse(cr, uid, invoice_vals['fiscal_position'], context=context)
account_id = fp_obj.map_account(cr, uid, fiscal_position, account_id)
# set UoS if it's a sale and the picking doesn't have one
uos_id = move_line.product_uos and move_line.product_uos.id or False
if not uos_id and invoice_vals['type'] in ('out_invoice', 'out_refund'):
uos_id = move_line.product_uom.id
return {
'name': name,
'origin': origin,
'invoice_id': invoice_id,
'uos_id': uos_id,
'product_id': move_line.product_id.id,
'account_id': account_id,
'price_unit': self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type']),
'discount': self._get_discount_invoice(cr, uid, move_line),
'quantity': move_line.product_uos_qty or move_line.product_qty,
'invoice_line_tax_id': [(6, 0, self._get_taxes_invoice(cr, uid, move_line, invoice_vals['type']))],
'account_analytic_id': self._get_account_analytic_invoice(cr, uid, picking, move_line),
}
def action_invoice_create(self, cr, uid, ids, journal_id=False,
group=False, type='out_invoice', context=None):
""" Creates invoice based on the invoice state selected for picking.
@param journal_id: Id of journal
@param group: Whether to create a group invoice or not
@param type: Type invoice to be created
@return: Ids of created invoices for the pickings
"""
if context is None:
context = {}
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
partner_obj = self.pool.get('res.partner')
invoices_group = {}
res = {}
inv_type = type
for picking in self.browse(cr, uid, ids, context=context):
if picking.invoice_state != '2binvoiced':
continue
partner = self._get_partner_to_invoice(cr, uid, picking, context=context)
if isinstance(partner, int):
partner = partner_obj.browse(cr, uid, [partner], context=context)[0]
if not partner:
raise osv.except_osv(_('Error, no partner!'),
_('Please put a partner on the picking list if you want to generate invoice.'))
if not inv_type:
inv_type = self._get_invoice_type(picking)
if group and partner.id in invoices_group:
invoice_id = invoices_group[partner.id]
invoice = invoice_obj.browse(cr, uid, invoice_id)
invoice_vals_group = self._prepare_invoice_group(cr, uid, picking, partner, invoice, context=context)
invoice_obj.write(cr, uid, [invoice_id], invoice_vals_group, context=context)
else:
invoice_vals = self._prepare_invoice(cr, uid, picking, partner, inv_type, journal_id, context=context)
invoice_id = invoice_obj.create(cr, uid, invoice_vals, context=context)
invoices_group[partner.id] = invoice_id
res[picking.id] = invoice_id
for move_line in picking.move_lines:
if move_line.state == 'cancel':
continue
if move_line.scrapped:
# do no invoice scrapped products
continue
vals = self._prepare_invoice_line(cr, uid, group, picking, move_line,
invoice_id, invoice_vals, context=context)
if vals:
invoice_line_id = invoice_line_obj.create(cr, uid, vals, context=context)
self._invoice_line_hook(cr, uid, move_line, invoice_line_id)
invoice_obj.button_compute(cr, uid, [invoice_id], context=context,
set_total=(inv_type in ('in_invoice', 'in_refund')))
self.write(cr, uid, [picking.id], {
'invoice_state': 'invoiced',
}, context=context)
self._invoice_hook(cr, uid, picking, invoice_id)
self.write(cr, uid, res.keys(), {
'invoice_state': 'invoiced',
}, context=context)
return res
def test_done(self, cr, uid, ids, context=None):
""" Test whether the move lines are done or not.
@return: True or False
"""
ok = False
for pick in self.browse(cr, uid, ids, context=context):
if not pick.move_lines:
return True
for move in pick.move_lines:
if move.state not in ('cancel','done'):
return False
if move.state=='done':
ok = True
return ok
def test_cancel(self, cr, uid, ids, context=None):
""" Test whether the move lines are canceled or not.
@return: True or False
"""
for pick in self.browse(cr, uid, ids, context=context):
for move in pick.move_lines:
if move.state not in ('cancel',):
return False
return True
def allow_cancel(self, cr, uid, ids, context=None):
for pick in self.browse(cr, uid, ids, context=context):
if not pick.move_lines:
return True
for move in pick.move_lines:
if move.state == 'done':
raise osv.except_osv(_('Error!'), _('You cannot cancel the picking as some moves have been done. You should cancel the picking lines.'))
return True
def unlink(self, cr, uid, ids, context=None):
move_obj = self.pool.get('stock.move')
if context is None:
context = {}
for pick in self.browse(cr, uid, ids, context=context):
if pick.state in ['done','cancel']:
raise osv.except_osv(_('Error!'), _('You cannot remove the picking which is in %s state!')%(pick.state,))
else:
ids2 = [move.id for move in pick.move_lines]
ctx = context.copy()
ctx.update({'call_unlink':True})
if pick.state != 'draft':
#Cancelling the move in order to affect Virtual stock of product
move_obj.action_cancel(cr, uid, ids2, ctx)
#Removing the move
move_obj.unlink(cr, uid, ids2, ctx)
return super(stock_picking, self).unlink(cr, uid, ids, context=context)
# FIXME: needs refactoring, this code is partially duplicated in stock_move.do_partial()!
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial picking and moves done.
@param partial_datas : Dictionary containing details of partial picking
like partner_id, partner_id, delivery_date,
delivery moves with product_id, product_qty, uom
@return: Dictionary of values
"""
if context is None:
context = {}
else:
context = dict(context)
res = {}
move_obj = self.pool.get('stock.move')
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
sequence_obj = self.pool.get('ir.sequence')
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids, context=context):
new_picking = None
complete, too_many, too_few = [], [], []
move_product_qty, prodlot_ids, product_avail, partial_qty, product_uoms = {}, {}, {}, {}, {}
for move in pick.move_lines:
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s'%(move.id), {})
product_qty = partial_data.get('product_qty',0.0)
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom',False)
product_price = partial_data.get('product_price',0.0)
product_currency = partial_data.get('product_currency',False)
prodlot_id = partial_data.get('prodlot_id')
prodlot_ids[move.id] = prodlot_id
product_uoms[move.id] = product_uom
partial_qty[move.id] = uom_obj._compute_qty(cr, uid, product_uoms[move.id], product_qty, move.product_uom.id)
if move.product_qty == partial_qty[move.id]:
complete.append(move)
elif move.product_qty > partial_qty[move.id]:
too_few.append(move)
else:
too_many.append(move)
# Average price computation
if (pick.type == 'in') and (move.product_id.cost_method == 'average'):
product = product_obj.browse(cr, uid, move.product_id.id)
move_currency_id = move.company_id.currency_id.id
context['currency_id'] = move_currency_id
qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
if product.id in product_avail:
product_avail[product.id] += qty
else:
product_avail[product.id] = product.qty_available
if qty > 0:
new_price = currency_obj.compute(cr, uid, product_currency,
move_currency_id, product_price)
new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
product.uom_id.id)
if product.qty_available <= 0:
new_std_price = new_price
else:
# Get the standard price
amount_unit = product.price_get('standard_price', context=context)[product.id]
new_std_price = ((amount_unit * product_avail[product.id])\
+ (new_price * qty))/(product_avail[product.id] + qty)
# Write the field according to price type field
product_obj.write(cr, uid, [product.id], {'standard_price': new_std_price})
# Record the values that were chosen in the wizard, so they can be
# used for inventory valuation if real-time valuation is enabled.
move_obj.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency})
for move in too_few:
product_qty = move_product_qty[move.id]
if not new_picking:
new_picking_name = pick.name
self.write(cr, uid, [pick.id],
{'name': sequence_obj.get(cr, uid,
'stock.picking.%s'%(pick.type)),
})
new_picking = self.copy(cr, uid, pick.id,
{
'name': new_picking_name,
'move_lines' : [],
'state':'draft',
})
if product_qty != 0:
defaults = {
'product_qty' : product_qty,
'product_uos_qty': product_qty, #TODO: put correct uos_qty
'picking_id' : new_picking,
'state': 'assigned',
'move_dest_id': False,
'price_unit': move.price_unit,
'product_uom': product_uoms[move.id]
}
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
defaults.update(prodlot_id=prodlot_id)
move_obj.copy(cr, uid, move.id, defaults)
move_obj.write(cr, uid, [move.id],
{
'product_qty': move.product_qty - partial_qty[move.id],
'product_uos_qty': move.product_qty - partial_qty[move.id], #TODO: put correct uos_qty
'prodlot_id': False,
'tracking_id': False,
})
if new_picking:
move_obj.write(cr, uid, [c.id for c in complete], {'picking_id': new_picking})
for move in complete:
defaults = {'product_uom': product_uoms[move.id], 'product_qty': move_product_qty[move.id]}
if prodlot_ids.get(move.id):
defaults.update({'prodlot_id': prodlot_ids[move.id]})
move_obj.write(cr, uid, [move.id], defaults)
for move in too_many:
product_qty = move_product_qty[move.id]
defaults = {
'product_qty' : product_qty,
'product_uos_qty': product_qty, #TODO: put correct uos_qty
'product_uom': product_uoms[move.id]
}
prodlot_id = prodlot_ids.get(move.id)
if prodlot_ids.get(move.id):
defaults.update(prodlot_id=prodlot_id)
if new_picking:
defaults.update(picking_id=new_picking)
move_obj.write(cr, uid, [move.id], defaults)
# At first we confirm the new picking (if necessary)
if new_picking:
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_confirm', cr)
# Then we finish the good picking
self.write(cr, uid, [pick.id], {'backorder_id': new_picking})
self.action_move(cr, uid, [new_picking], context=context)
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_done', cr)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
delivered_pack_id = new_picking
back_order_name = self.browse(cr, uid, delivered_pack_id, context=context).name
self.message_post(cr, uid, ids, body=_("Back order <em>%s</em> has been <b>created</b>.") % (back_order_name), context=context)
else:
self.action_move(cr, uid, [pick.id], context=context)
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_done', cr)
delivered_pack_id = pick.id
delivered_pack = self.browse(cr, uid, delivered_pack_id, context=context)
res[pick.id] = {'delivered_picking': delivered_pack.id or False}
return res
# views associated to each picking type
_VIEW_LIST = {
'out': 'view_picking_out_form',
'in': 'view_picking_in_form',
'internal': 'view_picking_form',
}
def _get_view_id(self, cr, uid, type):
"""Get the view id suiting the given type
@param type: the picking type as a string
@return: view i, or False if no view found
"""
res = self.pool.get('ir.model.data').get_object_reference(cr, uid,
'stock', self._VIEW_LIST.get(type, 'view_picking_form'))
return res and res[1] or False
class stock_production_lot(osv.osv):
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name', 'prefix', 'ref'], context)
res = []
for record in reads:
name = record['name']
prefix = record['prefix']
if prefix:
name = prefix + '/' + name
if record['ref']:
name = '%s [%s]' % (name, record['ref'])
res.append((record['id'], name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
args = args or []
ids = []
if name:
ids = self.search(cr, uid, [('prefix', '=', name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
_name = 'stock.production.lot'
_description = 'Serial Number'
def _get_stock(self, cr, uid, ids, field_name, arg, context=None):
""" Gets stock of products for locations
@return: Dictionary of values
"""
if context is None:
context = {}
if 'location_id' not in context:
locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')], context=context)
else:
locations = context['location_id'] and [context['location_id']] or []
if isinstance(ids, (int, long)):
ids = [ids]
res = {}.fromkeys(ids, 0.0)
if locations:
cr.execute('''select
prodlot_id,
sum(qty)
from
stock_report_prodlots
where
location_id IN %s and prodlot_id IN %s group by prodlot_id''',(tuple(locations),tuple(ids),))
res.update(dict(cr.fetchall()))
return res
def _stock_search(self, cr, uid, obj, name, args, context=None):
""" Searches Ids of products
@return: Ids of locations
"""
locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')])
cr.execute('''select
prodlot_id,
sum(qty)
from
stock_report_prodlots
where
location_id IN %s group by prodlot_id
having sum(qty) '''+ str(args[0][1]) + str(args[0][2]),(tuple(locations),))
res = cr.fetchall()
ids = [('id', 'in', map(lambda x: x[0], res))]
return ids
_columns = {
'name': fields.char('Serial Number', size=64, required=True, help="Unique Serial Number, will be displayed as: PREFIX/SERIAL [INT_REF]"),
'ref': fields.char('Internal Reference', size=256, help="Internal reference number in case it differs from the manufacturer's serial number"),
'prefix': fields.char('Prefix', size=64, help="Optional prefix to prepend when displaying this serial number: PREFIX/SERIAL [INT_REF]"),
'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]),
'date': fields.datetime('Creation Date', required=True),
'stock_available': fields.function(_get_stock, fnct_search=_stock_search, type="float", string="Available", select=True,
help="Current quantity of products with this Serial Number available in company warehouses",
digits_compute=dp.get_precision('Product Unit of Measure')),
'revisions': fields.one2many('stock.production.lot.revision', 'lot_id', 'Revisions'),
'company_id': fields.many2one('res.company', 'Company', select=True),
'move_ids': fields.one2many('stock.move', 'prodlot_id', 'Moves for this serial number', readonly=True),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'),
'product_id': lambda x, y, z, c: c.get('product_id', False),
}
_sql_constraints = [
('name_ref_uniq', 'unique (name, ref)', 'The combination of Serial Number and internal reference must be unique !'),
]
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of a product
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
value=self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)
return value
def copy(self, cr, uid, id, default=None, context=None):
context = context or {}
default = default and default.copy() or {}
default.update(date=time.strftime('%Y-%m-%d %H:%M:%S'), move_ids=[])
return super(stock_production_lot, self).copy(cr, uid, id, default=default, context=context)
stock_production_lot()
class stock_production_lot_revision(osv.osv):
_name = 'stock.production.lot.revision'
_description = 'Serial Number Revision'
_columns = {
'name': fields.char('Revision Name', size=64, required=True),
'description': fields.text('Description'),
'date': fields.date('Revision Date'),
'indice': fields.char('Revision Number', size=16),
'author_id': fields.many2one('res.users', 'Author'),
'lot_id': fields.many2one('stock.production.lot', 'Serial Number', select=True, ondelete='cascade'),
'company_id': fields.related('lot_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
}
_defaults = {
'author_id': lambda x, y, z, c: z,
'date': fields.date.context_today,
}
stock_production_lot_revision()
# ----------------------------------------------------
# Move
# ----------------------------------------------------
#
# Fields:
# location_dest_id is only used for predicting futur stocks
#
class stock_move(osv.osv):
def _getSSCC(self, cr, uid, context=None):
cr.execute('select id from stock_tracking where create_uid=%s order by id desc limit 1', (uid,))
res = cr.fetchone()
return (res and res[0]) or False
_name = "stock.move"
_description = "Stock Move"
_order = 'date_expected desc, id'
_log_create = False
def action_partial_move(self, cr, uid, ids, context=None):
if context is None: context = {}
if context.get('active_model') != self._name:
context.update(active_ids=ids, active_model=self._name)
partial_id = self.pool.get("stock.partial.move").create(
cr, uid, {}, context=context)
return {
'name':_("Products to Process"),
'view_mode': 'form',
'view_id': False,
'view_type': 'form',
'res_model': 'stock.partial.move',
'res_id': partial_id,
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'new',
'domain': '[]',
'context': context
}
def name_get(self, cr, uid, ids, context=None):
res = []
for line in self.browse(cr, uid, ids, context=context):
name = line.location_id.name+' > '+line.location_dest_id.name
# optional prefixes
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
name = line.picking_id.origin + '/ ' + name
res.append((line.id, name))
return res
def _check_tracking(self, cr, uid, ids, context=None):
""" Checks if serial number is assigned to stock move or not.
@return: True or False
"""
for move in self.browse(cr, uid, ids, context=context):
if not move.prodlot_id and \
(move.state == 'done' and \
( \
(move.product_id.track_production and move.location_id.usage == 'production') or \
(move.product_id.track_production and move.location_dest_id.usage == 'production') or \
(move.product_id.track_incoming and move.location_id.usage == 'supplier') or \
(move.product_id.track_outgoing and move.location_dest_id.usage == 'customer') or \
(move.product_id.track_incoming and move.location_id.usage == 'inventory') \
)):
return False
return True
def _check_product_lot(self, cr, uid, ids, context=None):
""" Checks whether move is done or not and production lot is assigned to that move.
@return: True or False
"""
for move in self.browse(cr, uid, ids, context=context):
if move.prodlot_id and move.state == 'done' and (move.prodlot_id.product_id.id != move.product_id.id):
return False
return True
_columns = {
'name': fields.char('Description', required=True, select=True),
'priority': fields.selection([('0', 'Not urgent'), ('1', 'Urgent')], 'Priority'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
'date_expected': fields.datetime('Scheduled Date', states={'done': [('readonly', True)]},required=True, select=True, help="Scheduled date for the processing of this move"),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type','<>','service')],states={'done': [('readonly', True)]}),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True,states={'done': [('readonly', True)]},
help="This is the quantity of products from an inventory "
"point of view. For moves in the state 'done', this is the "
"quantity of products that were actually moved. For other "
"moves, this is the quantity of product that is planned to "
"be moved. Lowering this quantity does not generate a "
"backorder. Changing this quantity on assigned moves affects "
"the product reservation, and should be done with care."
),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True,states={'done': [('readonly', True)]}),
'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product Unit of Measure'), states={'done': [('readonly', True)]}),
'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}),
'product_packaging': fields.many2one('product.packaging', 'Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True,states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True,states={'done': [('readonly', True)]}, select=True, help="Location where the system will stock the finished products."),
'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),
'prodlot_id': fields.many2one('stock.production.lot', 'Serial Number', states={'done': [('readonly', True)]}, help="Serial number is used to put a serial number on the production", select=True),
'tracking_id': fields.many2one('stock.tracking', 'Pack', select=True, states={'done': [('readonly', True)]}, help="Logistical shipping unit: pallet, box, pack ..."),
'auto_validate': fields.boolean('Auto Validate'),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True),
'move_history_ids': fields.many2many('stock.move', 'stock_move_history_ids', 'parent_id', 'child_id', 'Move History (child moves)'),
'move_history_ids2': fields.many2many('stock.move', 'stock_move_history_ids', 'child_id', 'parent_id', 'Move History (parent moves)'),
'picking_id': fields.many2one('stock.picking', 'Reference', select=True,states={'done': [('readonly', True)]}),
'note': fields.text('Notes'),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Move'),
('confirmed', 'Waiting Availability'),
('assigned', 'Available'),
('done', 'Done'),
], 'Status', readonly=True, select=True,
help= "* New: When the stock move is created and not yet confirmed.\n"\
"* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\
"* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\
"* Available: When products are reserved, it is set to \'Available\'.\n"\
"* Done: When the shipment is processed, the state is \'Done\'."),
'price_unit': fields.float('Unit Price', digits_compute= dp.get_precision('Product Price'), help="Technical field used to record the product cost set by the user during a picking confirmation (when average price costing method is used)"),
'price_currency_id': fields.many2one('res.currency', 'Currency for average price', help="Technical field used to record the currency chosen by the user during a picking confirmation (when average price costing method is used)"),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'backorder_id': fields.related('picking_id','backorder_id',type='many2one', relation="stock.picking", string="Back Order of", select=True),
'origin': fields.related('picking_id','origin',type='char', size=64, relation="stock.picking", string="Source", store=True),
# used for colors in tree views:
'scrapped': fields.related('location_dest_id','scrap_location',type='boolean',relation='stock.location',string='Scrapped', readonly=True),
'type': fields.related('picking_id', 'type', type='selection', selection=[('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], string='Shipping Type'),
}
def _check_location(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if (record.state=='done') and (record.location_id.usage == 'view'):
raise osv.except_osv(_('Error'), _('You cannot move product %s from a location of type view %s.')% (record.product_id.name, record.location_id.name))
if (record.state=='done') and (record.location_dest_id.usage == 'view' ):
raise osv.except_osv(_('Error'), _('You cannot move product %s to a location of type view %s.')% (record.product_id.name, record.location_dest_id.name))
return True
_constraints = [
(_check_tracking,
'You must assign a serial number for this product.',
['prodlot_id']),
(_check_location, 'You cannot move products from or to a location of the type view.',
['location_id','location_dest_id']),
(_check_product_lot,
'You try to assign a lot which is not from the same product.',
['prodlot_id'])]
def _default_location_destination(self, cr, uid, context=None):
""" Gets default address of partner for destination location
@return: Address id or False
"""
mod_obj = self.pool.get('ir.model.data')
picking_type = context.get('picking_type')
location_id = False
if context is None:
context = {}
if context.get('move_line', []):
if context['move_line'][0]:
if isinstance(context['move_line'][0], (tuple, list)):
location_id = context['move_line'][0][2] and context['move_line'][0][2].get('location_dest_id',False)
else:
move_list = self.pool.get('stock.move').read(cr, uid, context['move_line'][0], ['location_dest_id'])
location_id = move_list and move_list['location_dest_id'][0] or False
elif context.get('address_out_id', False):
property_out = self.pool.get('res.partner').browse(cr, uid, context['address_out_id'], context).property_stock_customer
location_id = property_out and property_out.id or False
else:
location_xml_id = False
if picking_type in ('in', 'internal'):
location_xml_id = 'stock_location_stock'
elif picking_type == 'out':
location_xml_id = 'stock_location_customers'
if location_xml_id:
location_model, location_id = mod_obj.get_object_reference(cr, uid, 'stock', location_xml_id)
return location_id
def _default_location_source(self, cr, uid, context=None):
""" Gets default address of partner for source location
@return: Address id or False
"""
mod_obj = self.pool.get('ir.model.data')
picking_type = context.get('picking_type')
location_id = False
if context is None:
context = {}
if context.get('move_line', []):
try:
location_id = context['move_line'][0][2]['location_id']
except:
pass
elif context.get('address_in_id', False):
part_obj_add = self.pool.get('res.partner').browse(cr, uid, context['address_in_id'], context=context)
if part_obj_add:
location_id = part_obj_add.property_stock_supplier.id
else:
location_xml_id = False
if picking_type == 'in':
location_xml_id = 'stock_location_suppliers'
elif picking_type in ('out', 'internal'):
location_xml_id = 'stock_location_stock'
if location_xml_id:
location_model, location_id = mod_obj.get_object_reference(cr, uid, 'stock', location_xml_id)
return location_id
def _default_destination_address(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.partner_id.id
def _default_move_type(self, cr, uid, context=None):
""" Gets default type of move
@return: type
"""
if context is None:
context = {}
picking_type = context.get('picking_type')
type = 'internal'
if picking_type == 'in':
type = 'in'
elif picking_type == 'out':
type = 'out'
return type
_defaults = {
'location_id': _default_location_source,
'location_dest_id': _default_location_destination,
'partner_id': _default_destination_address,
'type': _default_move_type,
'state': 'draft',
'priority': '1',
'product_qty': 1.0,
'scrapped' : False,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c),
'date_expected': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if uid != 1:
frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id'])
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
if frozen_fields.intersection(vals):
raise osv.except_osv(_('Operation Forbidden!'),
_('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).'))
return super(stock_move, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
default.update({'move_history_ids2': [], 'move_history_ids': []})
return super(stock_move, self).copy(cr, uid, id, default, context=context)
def _auto_init(self, cursor, context=None):
res = super(stock_move, self)._auto_init(cursor, context=context)
cursor.execute('SELECT indexname \
FROM pg_indexes \
WHERE indexname = \'stock_move_location_id_location_dest_id_product_id_state\'')
if not cursor.fetchone():
cursor.execute('CREATE INDEX stock_move_location_id_location_dest_id_product_id_state \
ON stock_move (product_id, state, location_id, location_dest_id)')
return res
def onchange_lot_id(self, cr, uid, ids, prodlot_id=False, product_qty=False,
loc_id=False, product_id=False, uom_id=False, context=None):
""" On change of production lot gives a warning message.
@param prodlot_id: Changed production lot id
@param product_qty: Quantity of product
@param loc_id: Location id
@param product_id: Product id
@return: Warning message
"""
if not prodlot_id or not loc_id:
return {}
ctx = context and context.copy() or {}
ctx['location_id'] = loc_id
ctx.update({'raise-exception': True})
uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
product_uom = product_obj.browse(cr, uid, product_id, context=ctx).uom_id
prodlot = self.pool.get('stock.production.lot').browse(cr, uid, prodlot_id, context=ctx)
location = self.pool.get('stock.location').browse(cr, uid, loc_id, context=ctx)
uom = uom_obj.browse(cr, uid, uom_id, context=ctx)
amount_actual = uom_obj._compute_qty_obj(cr, uid, product_uom, prodlot.stock_available, uom, context=ctx)
warning = {}
if (location.usage == 'internal') and (product_qty > (amount_actual or 0.0)):
warning = {
'title': _('Insufficient Stock for Serial Number !'),
'message': _('You are moving %.2f %s but only %.2f %s available for this serial number.') % (product_qty, uom.name, amount_actual, uom.name)
}
return {'warning': warning}
def onchange_quantity(self, cr, uid, ids, product_id, product_qty,
product_uom, product_uos):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_qty: Changed Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uos_qty': 0.00
}
warning = {}
if (not product_id) or (product_qty <=0.0):
result['product_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# Warn if the quantity was decreased
if ids:
for move in self.read(cr, uid, ids, ['product_qty']):
if product_qty < move['product_qty']:
warning.update({
'title': _('Information'),
'message': _("By changing this quantity here, you accept the "
"new quantity as complete: OpenERP will not "
"automatically generate a back order.") })
break
if product_uos and product_uom and (product_uom != product_uos):
result['product_uos_qty'] = product_qty * uos_coeff['uos_coeff']
else:
result['product_uos_qty'] = product_qty
return {'value': result, 'warning': warning}
def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty,
product_uos, product_uom):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_uos_qty: Changed UoS Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_qty': 0.00
}
warning = {}
if (not product_id) or (product_uos_qty <=0.0):
result['product_uos_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# Warn if the quantity was decreased
for move in self.read(cr, uid, ids, ['product_uos_qty']):
if product_uos_qty < move['product_uos_qty']:
warning.update({
'title': _('Warning: No Back Order'),
'message': _("By changing the quantity here, you accept the "
"new quantity as complete: OpenERP will not "
"automatically generate a Back Order.") })
break
if product_uos and product_uom and (product_uom != product_uos):
result['product_qty'] = product_uos_qty / uos_coeff['uos_coeff']
else:
result['product_qty'] = product_uos_qty
return {'value': result, 'warning': warning}
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False,
loc_dest_id=False, partner_id=False):
""" On change of product id, if finds UoM, UoS, quantity and UoS quantity.
@param prod_id: Changed Product id
@param loc_id: Source location id
@param loc_dest_id: Destination location id
@param partner_id: Address id of partner
@return: Dictionary of values
"""
if not prod_id:
return {}
lang = False
if partner_id:
addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id)
if addr_rec:
lang = addr_rec and addr_rec.lang or False
ctx = {'lang': lang}
product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0]
uos_id = product.uos_id and product.uos_id.id or False
result = {
'product_uom': product.uom_id.id,
'product_uos': uos_id,
'product_qty': 1.00,
'product_uos_qty' : self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty'],
'prodlot_id' : False,
}
if not ids:
result['name'] = product.partner_ref
if loc_id:
result['location_id'] = loc_id
if loc_dest_id:
result['location_dest_id'] = loc_dest_id
return {'value': result}
def onchange_move_type(self, cr, uid, ids, type, context=None):
""" On change of move type gives sorce and destination location.
@param type: Move Type
@return: Dictionary of values
"""
mod_obj = self.pool.get('ir.model.data')
location_source_id = 'stock_location_stock'
location_dest_id = 'stock_location_stock'
if type == 'in':
location_source_id = 'stock_location_suppliers'
location_dest_id = 'stock_location_stock'
elif type == 'out':
location_source_id = 'stock_location_stock'
location_dest_id = 'stock_location_customers'
source_location = mod_obj.get_object_reference(cr, uid, 'stock', location_source_id)
dest_location = mod_obj.get_object_reference(cr, uid, 'stock', location_dest_id)
return {'value':{'location_id': source_location and source_location[1] or False, 'location_dest_id': dest_location and dest_location[1] or False}}
def onchange_date(self, cr, uid, ids, date, date_expected, context=None):
""" On change of Scheduled Date gives a Move date.
@param date_expected: Scheduled Date
@param date: Move Date
@return: Move Date
"""
if not date_expected:
date_expected = time.strftime('%Y-%m-%d %H:%M:%S')
return {'value':{'date': date_expected}}
def _chain_compute(self, cr, uid, moves, context=None):
""" Finds whether the location has chained location type or not.
@param moves: Stock moves
@return: Dictionary containing destination location with chained location type.
"""
result = {}
for m in moves:
dest = self.pool.get('stock.location').chained_location_get(
cr,
uid,
m.location_dest_id,
m.picking_id and m.picking_id.partner_id and m.picking_id.partner_id,
m.product_id,
context
)
if dest:
if dest[1] == 'transparent':
newdate = (datetime.strptime(m.date, '%Y-%m-%d %H:%M:%S') + relativedelta(days=dest[2] or 0)).strftime('%Y-%m-%d')
self.write(cr, uid, [m.id], {
'date': newdate,
'location_dest_id': dest[0].id})
if m.picking_id and (dest[3] or dest[5]):
self.pool.get('stock.picking').write(cr, uid, [m.picking_id.id], {
'stock_journal_id': dest[3] or m.picking_id.stock_journal_id.id,
'type': dest[5] or m.picking_id.type
}, context=context)
m.location_dest_id = dest[0]
res2 = self._chain_compute(cr, uid, [m], context=context)
for pick_id in res2.keys():
result.setdefault(pick_id, [])
result[pick_id] += res2[pick_id]
else:
result.setdefault(m.picking_id, [])
result[m.picking_id].append( (m, dest) )
return result
def _prepare_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
"""Prepare the definition (values) to create a new chained picking.
:param str picking_name: desired new picking name
:param browse_record picking: source picking (being chained to)
:param str picking_type: desired new picking type
:param list moves_todo: specification of the stock moves to be later included in this
picking, in the form::
[[move, (dest_location, auto_packing, chained_delay, chained_journal,
chained_company_id, chained_picking_type)],
...
]
See also :meth:`stock_location.chained_location_get`.
"""
res_company = self.pool.get('res.company')
return {
'name': picking_name,
'origin': tools.ustr(picking.origin or ''),
'type': picking_type,
'note': picking.note,
'move_type': picking.move_type,
'auto_picking': moves_todo[0][1][1] == 'auto',
'stock_journal_id': moves_todo[0][1][3],
'company_id': moves_todo[0][1][4] or res_company._company_default_get(cr, uid, 'stock.company', context=context),
'partner_id': picking.partner_id.id,
'invoice_state': 'none',
'date': picking.date,
}
def _create_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
picking_obj = self.pool.get('stock.picking')
return picking_obj.create(cr, uid, self._prepare_chained_picking(cr, uid, picking_name, picking, picking_type, moves_todo, context=context))
def create_chained_picking(self, cr, uid, moves, context=None):
res_obj = self.pool.get('res.company')
location_obj = self.pool.get('stock.location')
move_obj = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
new_moves = []
if context is None:
context = {}
seq_obj = self.pool.get('ir.sequence')
for picking, todo in self._chain_compute(cr, uid, moves, context=context).items():
ptype = todo[0][1][5] and todo[0][1][5] or location_obj.picking_type_get(cr, uid, todo[0][0].location_dest_id, todo[0][1][0])
if picking:
# name of new picking according to its type
new_pick_name = seq_obj.get(cr, uid, 'stock.picking.' + ptype)
pickid = self._create_chained_picking(cr, uid, new_pick_name, picking, ptype, todo, context=context)
# Need to check name of old picking because it always considers picking as "OUT" when created from Sales Order
old_ptype = location_obj.picking_type_get(cr, uid, picking.move_lines[0].location_id, picking.move_lines[0].location_dest_id)
if old_ptype != picking.type:
old_pick_name = seq_obj.get(cr, uid, 'stock.picking.' + old_ptype)
self.pool.get('stock.picking').write(cr, uid, [picking.id], {'name': old_pick_name, 'type': old_ptype}, context=context)
else:
pickid = False
for move, (loc, dummy, delay, dummy, company_id, ptype, invoice_state) in todo:
new_id = move_obj.copy(cr, uid, move.id, {
'location_id': move.location_dest_id.id,
'location_dest_id': loc.id,
'date': time.strftime('%Y-%m-%d'),
'picking_id': pickid,
'state': 'waiting',
'company_id': company_id or res_obj._company_default_get(cr, uid, 'stock.company', context=context) ,
'move_history_ids': [],
'date_expected': (datetime.strptime(move.date, '%Y-%m-%d %H:%M:%S') + relativedelta(days=delay or 0)).strftime('%Y-%m-%d'),
'move_history_ids2': []}
)
move_obj.write(cr, uid, [move.id], {
'move_dest_id': new_id,
'move_history_ids': [(4, new_id)]
})
new_moves.append(self.browse(cr, uid, [new_id])[0])
if pickid:
wf_service.trg_validate(uid, 'stock.picking', pickid, 'button_confirm', cr)
if new_moves:
new_moves += self.create_chained_picking(cr, uid, new_moves, context)
return new_moves
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms stock move.
@return: List of ids.
"""
moves = self.browse(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state': 'confirmed'})
self.create_chained_picking(cr, uid, moves, context)
return []
def action_assign(self, cr, uid, ids, *args):
""" Changes state to confirmed or waiting.
@return: List of values
"""
todo = []
for move in self.browse(cr, uid, ids):
if move.state in ('confirmed', 'waiting'):
todo.append(move.id)
res = self.check_assign(cr, uid, todo)
return res
def force_assign(self, cr, uid, ids, context=None):
""" Changes the state to assigned.
@return: True
"""
self.write(cr, uid, ids, {'state': 'assigned'})
wf_service = netsvc.LocalService('workflow')
for move in self.browse(cr, uid, ids, context):
if move.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.picking_id.id, cr)
return True
def cancel_assign(self, cr, uid, ids, context=None):
""" Changes the state to confirmed.
@return: True
"""
self.write(cr, uid, ids, {'state': 'confirmed'})
# fix for bug lp:707031
# called write of related picking because changing move availability does
# not trigger workflow of picking in order to change the state of picking
wf_service = netsvc.LocalService('workflow')
for move in self.browse(cr, uid, ids, context):
if move.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.picking_id.id, cr)
return True
#
# Duplicate stock.move
#
def check_assign(self, cr, uid, ids, context=None):
""" Checks the product type and accordingly writes the state.
@return: No. of moves done
"""
done = []
count = 0
pickings = {}
if context is None:
context = {}
for move in self.browse(cr, uid, ids, context=context):
if move.product_id.type == 'consu' or move.location_id.usage == 'supplier':
if move.state in ('confirmed', 'waiting'):
done.append(move.id)
pickings[move.picking_id.id] = 1
continue
if move.state in ('confirmed', 'waiting'):
# Important: we must pass lock=True to _product_reserve() to avoid race conditions and double reservations
res = self.pool.get('stock.location')._product_reserve(cr, uid, [move.location_id.id], move.product_id.id, move.product_qty, {'uom': move.product_uom.id}, lock=True)
if res:
#_product_available_test depends on the next status for correct functioning
#the test does not work correctly if the same product occurs multiple times
#in the same order. This is e.g. the case when using the button 'split in two' of
#the stock outgoing form
self.write(cr, uid, [move.id], {'state':'assigned'})
done.append(move.id)
pickings[move.picking_id.id] = 1
r = res.pop(0)
product_uos_qty = self.pool.get('stock.move').onchange_quantity(cr, uid, ids, move.product_id.id, r[0], move.product_id.uom_id.id, move.product_id.uos_id.id)['value']['product_uos_qty']
cr.execute('update stock_move set location_id=%s, product_qty=%s, product_uos_qty=%s where id=%s', (r[1], r[0],product_uos_qty, move.id))
while res:
r = res.pop(0)
move_id = self.copy(cr, uid, move.id, {'product_uos_qty': product_uos_qty, 'product_qty': r[0], 'location_id': r[1]})
done.append(move_id)
if done:
count += len(done)
self.write(cr, uid, done, {'state': 'assigned'})
if count:
for pick_id in pickings:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_write(uid, 'stock.picking', pick_id, cr)
return count
def setlast_tracking(self, cr, uid, ids, context=None):
tracking_obj = self.pool.get('stock.tracking')
picking = self.browse(cr, uid, ids, context=context)[0].picking_id
if picking:
last_track = [line.tracking_id.id for line in picking.move_lines if line.tracking_id]
if not last_track:
last_track = tracking_obj.create(cr, uid, {}, context=context)
else:
last_track.sort()
last_track = last_track[-1]
self.write(cr, uid, ids, {'tracking_id': last_track})
return True
#
# Cancel move => cancel others move and pickings
#
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the moves and if all moves are cancelled it cancels the picking.
@return: True
"""
if not len(ids):
return True
if context is None:
context = {}
pickings = set()
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('confirmed', 'waiting', 'assigned', 'draft'):
if move.picking_id:
pickings.add(move.picking_id.id)
if move.move_dest_id and move.move_dest_id.state == 'waiting':
self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'})
if context.get('call_unlink',False) and move.move_dest_id.picking_id:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_write(uid, 'stock.picking', move.move_dest_id.picking_id.id, cr)
self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False})
if not context.get('call_unlink',False):
for pick in self.pool.get('stock.picking').browse(cr, uid, list(pickings), context=context):
if all(move.state == 'cancel' for move in pick.move_lines):
self.pool.get('stock.picking').write(cr, uid, [pick.id], {'state': 'cancel'})
wf_service = netsvc.LocalService("workflow")
for id in ids:
wf_service.trg_trigger(uid, 'stock.move', id, cr)
return True
def _get_accounting_data_for_valuation(self, cr, uid, move, context=None):
"""
Return the accounts and journal to use to post Journal Entries for the real-time
valuation of the move.
:param context: context dictionary that can explicitly mention the company to consider via the 'force_company' key
:raise: osv.except_osv() is any mandatory account or journal is not defined.
"""
product_obj=self.pool.get('product.product')
accounts = product_obj.get_product_accounts(cr, uid, move.product_id.id, context)
if move.location_id.valuation_out_account_id:
acc_src = move.location_id.valuation_out_account_id.id
else:
acc_src = accounts['stock_account_input']
if move.location_dest_id.valuation_in_account_id:
acc_dest = move.location_dest_id.valuation_in_account_id.id
else:
acc_dest = accounts['stock_account_output']
acc_valuation = accounts.get('property_stock_valuation_account_id', False)
journal_id = accounts['stock_journal']
if acc_dest == acc_valuation:
raise osv.except_osv(_('Error!'), _('Cannot create Journal Entry, Output Account of this product and Valuation account on category of this product are same.'))
if acc_src == acc_valuation:
raise osv.except_osv(_('Error!'), _('Cannot create Journal Entry, Input Account of this product and Valuation account on category of this product are same.'))
if not acc_src:
raise osv.except_osv(_('Error!'), _('Please define stock input account for this product or its category: "%s" (id: %d)') % \
(move.product_id.name, move.product_id.id,))
if not acc_dest:
raise osv.except_osv(_('Error!'), _('Please define stock output account for this product or its category: "%s" (id: %d)') % \
(move.product_id.name, move.product_id.id,))
if not journal_id:
raise osv.except_osv(_('Error!'), _('Please define journal on the product category: "%s" (id: %d)') % \
(move.product_id.categ_id.name, move.product_id.categ_id.id,))
if not acc_valuation:
raise osv.except_osv(_('Error!'), _('Please define inventory valuation account on the product category: "%s" (id: %d)') % \
(move.product_id.categ_id.name, move.product_id.categ_id.id,))
return journal_id, acc_src, acc_dest, acc_valuation
def _get_reference_accounting_values_for_valuation(self, cr, uid, move, context=None):
"""
Return the reference amount and reference currency representing the inventory valuation for this move.
These reference values should possibly be converted before being posted in Journals to adapt to the primary
and secondary currencies of the relevant accounts.
"""
product_uom_obj = self.pool.get('product.uom')
# by default the reference currency is that of the move's company
reference_currency_id = move.company_id.currency_id.id
default_uom = move.product_id.uom_id.id
qty = product_uom_obj._compute_qty(cr, uid, move.product_uom.id, move.product_qty, default_uom)
# if product is set to average price and a specific value was entered in the picking wizard,
# we use it
if move.product_id.cost_method == 'average' and move.price_unit:
reference_amount = qty * move.price_unit
reference_currency_id = move.price_currency_id.id or reference_currency_id
# Otherwise we default to the company's valuation price type, considering that the values of the
# valuation field are expressed in the default currency of the move's company.
else:
if context is None:
context = {}
currency_ctx = dict(context, currency_id = move.company_id.currency_id.id)
amount_unit = move.product_id.price_get('standard_price', context=currency_ctx)[move.product_id.id]
reference_amount = amount_unit * qty
return reference_amount, reference_currency_id
def _create_product_valuation_moves(self, cr, uid, move, context=None):
"""
Generate the appropriate accounting moves if the product being moves is subject
to real_time valuation tracking, and the source or destination location is
a transit location or is outside of the company.
"""
if move.product_id.valuation == 'real_time': # FIXME: product valuation should perhaps be a property?
if context is None:
context = {}
src_company_ctx = dict(context,force_company=move.location_id.company_id.id)
dest_company_ctx = dict(context,force_company=move.location_dest_id.company_id.id)
account_moves = []
# Outgoing moves (or cross-company output part)
if move.location_id.company_id \
and (move.location_id.usage == 'internal' and move.location_dest_id.usage != 'internal'\
or move.location_id.company_id != move.location_dest_id.company_id):
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, src_company_ctx)
reference_amount, reference_currency_id = self._get_reference_accounting_values_for_valuation(cr, uid, move, src_company_ctx)
#returning goods to supplier
if move.location_dest_id.usage == 'supplier':
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_valuation, acc_src, reference_amount, reference_currency_id, context))]
else:
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_valuation, acc_dest, reference_amount, reference_currency_id, context))]
# Incoming moves (or cross-company input part)
if move.location_dest_id.company_id \
and (move.location_id.usage != 'internal' and move.location_dest_id.usage == 'internal'\
or move.location_id.company_id != move.location_dest_id.company_id):
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, dest_company_ctx)
reference_amount, reference_currency_id = self._get_reference_accounting_values_for_valuation(cr, uid, move, src_company_ctx)
#goods return from customer
if move.location_id.usage == 'customer':
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_dest, acc_valuation, reference_amount, reference_currency_id, context))]
else:
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_src, acc_valuation, reference_amount, reference_currency_id, context))]
move_obj = self.pool.get('account.move')
for j_id, move_lines in account_moves:
move_obj.create(cr, uid,
{
'journal_id': j_id,
'line_id': move_lines,
'ref': move.picking_id and move.picking_id.name})
def action_done(self, cr, uid, ids, context=None):
""" Makes the move done and if all moves are done, it will finish the picking.
@return:
"""
picking_ids = []
move_ids = []
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
todo = []
for move in self.browse(cr, uid, ids, context=context):
if move.state=="draft":
todo.append(move.id)
if todo:
self.action_confirm(cr, uid, todo, context=context)
todo = []
for move in self.browse(cr, uid, ids, context=context):
if move.state in ['done','cancel']:
continue
move_ids.append(move.id)
if move.picking_id:
picking_ids.append(move.picking_id.id)
if move.move_dest_id.id and (move.state != 'done'):
# Downstream move should only be triggered if this move is the last pending upstream move
other_upstream_move_ids = self.search(cr, uid, [('id','!=',move.id),('state','not in',['done','cancel']),
('move_dest_id','=',move.move_dest_id.id)], context=context)
if not other_upstream_move_ids:
self.write(cr, uid, [move.id], {'move_history_ids': [(4, move.move_dest_id.id)]})
if move.move_dest_id.state in ('waiting', 'confirmed'):
self.force_assign(cr, uid, [move.move_dest_id.id], context=context)
if move.move_dest_id.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.move_dest_id.picking_id.id, cr)
if move.move_dest_id.auto_validate:
self.action_done(cr, uid, [move.move_dest_id.id], context=context)
self._create_product_valuation_moves(cr, uid, move, context=context)
if move.state not in ('confirmed','done','assigned'):
todo.append(move.id)
if todo:
self.action_confirm(cr, uid, todo, context=context)
self.write(cr, uid, move_ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
for id in move_ids:
wf_service.trg_trigger(uid, 'stock.move', id, cr)
for pick_id in picking_ids:
wf_service.trg_write(uid, 'stock.picking', pick_id, cr)
return True
def _create_account_move_line(self, cr, uid, move, src_account_id, dest_account_id, reference_amount, reference_currency_id, context=None):
"""
Generate the account.move.line values to post to track the stock valuation difference due to the
processing of the given stock move.
"""
# prepare default values considering that the destination accounts have the reference_currency_id as their main currency
partner_id = (move.picking_id.partner_id and self.pool.get('res.partner')._find_accounting_partner(move.picking_id.partner_id).id) or False
debit_line_vals = {
'name': move.name,
'product_id': move.product_id and move.product_id.id or False,
'quantity': move.product_qty,
'ref': move.picking_id and move.picking_id.name or False,
'date': time.strftime('%Y-%m-%d'),
'partner_id': partner_id,
'debit': reference_amount,
'account_id': dest_account_id,
}
credit_line_vals = {
'name': move.name,
'product_id': move.product_id and move.product_id.id or False,
'quantity': move.product_qty,
'ref': move.picking_id and move.picking_id.name or False,
'date': time.strftime('%Y-%m-%d'),
'partner_id': partner_id,
'credit': reference_amount,
'account_id': src_account_id,
}
# if we are posting to accounts in a different currency, provide correct values in both currencies correctly
# when compatible with the optional secondary currency on the account.
# Financial Accounts only accept amounts in secondary currencies if there's no secondary currency on the account
# or if it's the same as that of the secondary amount being posted.
account_obj = self.pool.get('account.account')
src_acct, dest_acct = account_obj.browse(cr, uid, [src_account_id, dest_account_id], context=context)
src_main_currency_id = src_acct.company_id.currency_id.id
dest_main_currency_id = dest_acct.company_id.currency_id.id
cur_obj = self.pool.get('res.currency')
if reference_currency_id != src_main_currency_id:
# fix credit line:
credit_line_vals['credit'] = cur_obj.compute(cr, uid, reference_currency_id, src_main_currency_id, reference_amount, context=context)
if (not src_acct.currency_id) or src_acct.currency_id.id == reference_currency_id:
credit_line_vals.update(currency_id=reference_currency_id, amount_currency=-reference_amount)
if reference_currency_id != dest_main_currency_id:
# fix debit line:
debit_line_vals['debit'] = cur_obj.compute(cr, uid, reference_currency_id, dest_main_currency_id, reference_amount, context=context)
if (not dest_acct.currency_id) or dest_acct.currency_id.id == reference_currency_id:
debit_line_vals.update(currency_id=reference_currency_id, amount_currency=reference_amount)
return [(0, 0, debit_line_vals), (0, 0, credit_line_vals)]
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
ctx = context.copy()
for move in self.browse(cr, uid, ids, context=context):
if move.state != 'draft' and not ctx.get('call_unlink', False):
raise osv.except_osv(_('User Error!'), _('You can only delete draft moves.'))
return super(stock_move, self).unlink(
cr, uid, ids, context=ctx)
# _create_lot function is not used anywhere
def _create_lot(self, cr, uid, ids, product_id, prefix=False):
""" Creates production lot
@return: Production lot id
"""
prodlot_obj = self.pool.get('stock.production.lot')
prodlot_id = prodlot_obj.create(cr, uid, {'prefix': prefix, 'product_id': product_id})
return prodlot_id
def action_scrap(self, cr, uid, ids, quantity, location_id, context=None):
""" Move the scrap/damaged product into scrap location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be scrapped
@param quantity : specify scrap qty
@param location_id : specify scrap location
@param context: context arguments
@return: Scraped lines
"""
#quantity should in MOVE UOM
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide a positive quantity to scrap.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
source_location = move.location_id
if move.state == 'done':
source_location = move.location_dest_id
if source_location.usage != 'internal':
#restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere)
raise osv.except_osv(_('Error!'), _('Forbidden operation: it is not allowed to scrap products from a virtual location.'))
move_qty = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
default_val = {
'location_id': source_location.id,
'product_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'scrapped': True,
'location_dest_id': location_id,
'tracking_id': move.tracking_id.id,
'prodlot_id': move.prodlot_id.id,
}
new_move = self.copy(cr, uid, move.id, default_val)
res += [new_move]
product_obj = self.pool.get('product.product')
for product in product_obj.browse(cr, uid, [move.product_id.id], context=context):
if move.picking_id:
uom = product.uom_id.name if product.uom_id else ''
message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name)
move.picking_id.message_post(body=message)
self.action_done(cr, uid, res, context=context)
return res
# action_split function is not used anywhere
# FIXME: deprecate this method
def action_split(self, cr, uid, ids, quantity, split_by_qty=1, prefix=False, with_lot=True, context=None):
""" Split Stock Move lines into production lot which specified split by quantity.
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be splited
@param split_by_qty : specify split by qty
@param prefix : specify prefix of production lot
@param with_lot : if true, prodcution lot will assign for split line otherwise not.
@param context: context arguments
@return: Splited move lines
"""
if context is None:
context = {}
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
if split_by_qty <= 0 or quantity == 0:
return res
uos_qty = split_by_qty / move.product_qty * move.product_uos_qty
quantity_rest = quantity % split_by_qty
uos_qty_rest = split_by_qty / move.product_qty * move.product_uos_qty
update_val = {
'product_qty': split_by_qty,
'product_uos_qty': uos_qty,
}
for idx in range(int(quantity//split_by_qty)):
if not idx and move.product_qty<=quantity:
current_move = move.id
else:
current_move = self.copy(cr, uid, move.id, {'state': move.state})
res.append(current_move)
if with_lot:
update_val['prodlot_id'] = self._create_lot(cr, uid, [current_move], move.product_id.id)
self.write(cr, uid, [current_move], update_val)
if quantity_rest > 0:
idx = int(quantity//split_by_qty)
update_val['product_qty'] = quantity_rest
update_val['product_uos_qty'] = uos_qty_rest
if not idx and move.product_qty<=quantity:
current_move = move.id
else:
current_move = self.copy(cr, uid, move.id, {'state': move.state})
res.append(current_move)
if with_lot:
update_val['prodlot_id'] = self._create_lot(cr, uid, [current_move], move.product_id.id)
self.write(cr, uid, [current_move], update_val)
return res
def action_consume(self, cr, uid, ids, quantity, location_id=False, context=None):
""" Consumed product with specific quatity from specific source location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be consumed
@param quantity : specify consume quantity
@param location_id : specify source location
@param context: context arguments
@return: Consumed lines
"""
#quantity should in MOVE UOM
if context is None:
context = {}
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
move_qty = move.product_qty
if move_qty <= 0:
raise osv.except_osv(_('Error!'), _('Cannot consume a move with negative or zero quantity.'))
quantity_rest = move.product_qty
quantity_rest -= quantity
uos_qty_rest = quantity_rest / move_qty * move.product_uos_qty
if quantity_rest <= 0:
quantity_rest = 0
uos_qty_rest = 0
quantity = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
if quantity_rest > 0:
default_val = {
'product_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'location_id': location_id or move.location_id.id,
}
current_move = self.copy(cr, uid, move.id, default_val)
res += [current_move]
update_val = {}
update_val['product_qty'] = quantity_rest
update_val['product_uos_qty'] = uos_qty_rest
self.write(cr, uid, [move.id], update_val)
else:
quantity_rest = quantity
uos_qty_rest = uos_qty
res += [move.id]
update_val = {
'product_qty' : quantity_rest,
'product_uos_qty' : uos_qty_rest,
'location_id': location_id or move.location_id.id,
}
self.write(cr, uid, [move.id], update_val)
self.action_done(cr, uid, res, context=context)
return res
# FIXME: needs refactoring, this code is partially duplicated in stock_picking.do_partial()!
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial pickings and moves done.
@param partial_datas: Dictionary containing details of partial picking
like partner_id, delivery_date, delivery
moves with product_id, product_qty, uom
"""
res = {}
picking_obj = self.pool.get('stock.picking')
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
complete, too_many, too_few = [], [], []
move_product_qty = {}
prodlot_ids = {}
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s'%(move.id), False)
assert partial_data, _('Missing partial picking data for move #%s.') % (move.id)
product_qty = partial_data.get('product_qty',0.0)
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom',False)
product_price = partial_data.get('product_price',0.0)
product_currency = partial_data.get('product_currency',False)
prodlot_ids[move.id] = partial_data.get('prodlot_id')
if move.product_qty == product_qty:
complete.append(move)
elif move.product_qty > product_qty:
too_few.append(move)
else:
too_many.append(move)
# Average price computation
if (move.picking_id.type == 'in') and (move.product_id.cost_method == 'average'):
product = product_obj.browse(cr, uid, move.product_id.id)
move_currency_id = move.company_id.currency_id.id
context['currency_id'] = move_currency_id
qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
if qty > 0:
new_price = currency_obj.compute(cr, uid, product_currency,
move_currency_id, product_price)
new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
product.uom_id.id)
if product.qty_available <= 0:
new_std_price = new_price
else:
# Get the standard price
amount_unit = product.price_get('standard_price', context=context)[product.id]
new_std_price = ((amount_unit * product.qty_available)\
+ (new_price * qty))/(product.qty_available + qty)
product_obj.write(cr, uid, [product.id],{'standard_price': new_std_price})
# Record the values that were chosen in the wizard, so they can be
# used for inventory valuation if real-time valuation is enabled.
self.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency,
})
for move in too_few:
product_qty = move_product_qty[move.id]
if product_qty != 0:
defaults = {
'product_qty' : product_qty,
'product_uos_qty': product_qty,
'picking_id' : move.picking_id.id,
'state': 'assigned',
'move_dest_id': False,
'price_unit': move.price_unit,
}
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
defaults.update(prodlot_id=prodlot_id)
new_move = self.copy(cr, uid, move.id, defaults)
complete.append(self.browse(cr, uid, new_move))
self.write(cr, uid, [move.id],
{
'product_qty': move.product_qty - product_qty,
'product_uos_qty': move.product_qty - product_qty,
'prodlot_id': False,
'tracking_id': False,
})
for move in too_many:
self.write(cr, uid, [move.id],
{
'product_qty': move.product_qty,
'product_uos_qty': move.product_qty,
})
complete.append(move)
for move in complete:
if prodlot_ids.get(move.id):
self.write(cr, uid, [move.id],{'prodlot_id': prodlot_ids.get(move.id)})
self.action_done(cr, uid, [move.id], context=context)
if move.picking_id.id :
# TOCHECK : Done picking if all moves are done
cr.execute("""
SELECT move.id FROM stock_picking pick
RIGHT JOIN stock_move move ON move.picking_id = pick.id AND move.state = %s
WHERE pick.id = %s""",
('done', move.picking_id.id))
res = cr.fetchall()
if len(res) == len(move.picking_id.move_lines):
picking_obj.action_move(cr, uid, [move.picking_id.id])
wf_service.trg_validate(uid, 'stock.picking', move.picking_id.id, 'button_done', cr)
return [move.id for move in complete]
stock_move()
class stock_inventory(osv.osv):
_name = "stock.inventory"
_description = "Inventory"
_columns = {
'name': fields.char('Inventory Reference', size=64, required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date': fields.datetime('Creation Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_done': fields.datetime('Date done'),
'inventory_line_id': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=True, states={'draft': [('readonly', False)]}),
'move_ids': fields.many2many('stock.move', 'stock_inventory_move_rel', 'inventory_id', 'move_id', 'Created Moves'),
'state': fields.selection( (('draft', 'Draft'), ('cancel','Cancelled'), ('confirm','Confirmed'), ('done', 'Done')), 'Status', readonly=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft':[('readonly',False)]}),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
default.update({'move_ids': [], 'date_done': False})
return super(stock_inventory, self).copy(cr, uid, id, default, context=context)
def _inventory_line_hook(self, cr, uid, inventory_line, move_vals):
""" Creates a stock move from an inventory line
@param inventory_line:
@param move_vals:
@return:
"""
return self.pool.get('stock.move').create(cr, uid, move_vals)
def action_done(self, cr, uid, ids, context=None):
""" Finish the inventory
@return: True
"""
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
for inv in self.browse(cr, uid, ids, context=context):
move_obj.action_done(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state':'done', 'date_done': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_confirm(self, cr, uid, ids, context=None):
""" Confirm the inventory and writes its finished date
@return: True
"""
if context is None:
context = {}
# to perform the correct inventory corrections we need analyze stock location by
# location, never recursively, so we use a special context
product_context = dict(context, compute_child=False)
location_obj = self.pool.get('stock.location')
for inv in self.browse(cr, uid, ids, context=context):
move_ids = []
for line in inv.inventory_line_id:
pid = line.product_id.id
product_context.update(uom=line.product_uom.id, to_date=inv.date, date=inv.date, prodlot_id=line.prod_lot_id.id)
amount = location_obj._product_get(cr, uid, line.location_id.id, [pid], product_context)[pid]
change = line.product_qty - amount
lot_id = line.prod_lot_id.id
if change:
location_id = line.product_id.property_stock_inventory.id
value = {
'name': _('INV:') + (line.inventory_id.name or ''),
'product_id': line.product_id.id,
'product_uom': line.product_uom.id,
'prodlot_id': lot_id,
'date': inv.date,
}
if change > 0:
value.update( {
'product_qty': change,
'location_id': location_id,
'location_dest_id': line.location_id.id,
})
else:
value.update( {
'product_qty': -change,
'location_id': line.location_id.id,
'location_dest_id': location_id,
})
move_ids.append(self._inventory_line_hook(cr, uid, line, value))
self.write(cr, uid, [inv.id], {'state': 'confirm', 'move_ids': [(6, 0, move_ids)]})
self.pool.get('stock.move').action_confirm(cr, uid, move_ids, context=context)
return True
def action_cancel_draft(self, cr, uid, ids, context=None):
""" Cancels the stock move and change inventory state to draft.
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state':'draft'}, context=context)
return True
def action_cancel_inventory(self, cr, uid, ids, context=None):
""" Cancels both stock move and inventory
@return: True
"""
move_obj = self.pool.get('stock.move')
account_move_obj = self.pool.get('account.move')
for inv in self.browse(cr, uid, ids, context=context):
move_obj.action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
for move in inv.move_ids:
account_move_ids = account_move_obj.search(cr, uid, [('name', '=', move.name)])
if account_move_ids:
account_move_data_l = account_move_obj.read(cr, uid, account_move_ids, ['state'], context=context)
for account_move in account_move_data_l:
if account_move['state'] == 'posted':
raise osv.except_osv(_('User Error!'),
_('In order to cancel this inventory, you must first unpost related journal entries.'))
account_move_obj.unlink(cr, uid, [account_move['id']], context=context)
self.write(cr, uid, [inv.id], {'state': 'cancel'}, context=context)
return True
stock_inventory()
class stock_inventory_line(osv.osv):
_name = "stock.inventory.line"
_description = "Inventory Line"
_rec_name = "inventory_id"
_columns = {
'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'company_id': fields.related('inventory_id','company_id',type='many2one',relation='res.company',string='Company',store=True, select=True, readonly=True),
'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"),
'state': fields.related('inventory_id','state',type='char',string='Status',readonly=True),
}
def _default_stock_location(self, cr, uid, context=None):
stock_location = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'stock_location_stock')
return stock_location.id
_defaults = {
'location_id': _default_stock_location
}
def on_change_product_id(self, cr, uid, ids, location_id, product, uom=False, to_date=False):
""" Changes UoM and name if product_id changes.
@param location_id: Location id
@param product: Changed product_id
@param uom: UoM product
@return: Dictionary of changed values
"""
if not product:
return {'value': {'product_qty': 0.0, 'product_uom': False, 'prod_lot_id': False}}
obj_product = self.pool.get('product.product').browse(cr, uid, product)
uom = uom or obj_product.uom_id.id
amount = self.pool.get('stock.location')._product_get(cr, uid, location_id, [product], {'uom': uom, 'to_date': to_date, 'compute_child': False})[product]
result = {'product_qty': amount, 'product_uom': uom, 'prod_lot_id': False}
return {'value': result}
stock_inventory_line()
#----------------------------------------------------------
# Stock Warehouse
#----------------------------------------------------------
class stock_warehouse(osv.osv):
_name = "stock.warehouse"
_description = "Warehouse"
_columns = {
'name': fields.char('Name', size=128, required=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'partner_id': fields.many2one('res.partner', 'Owner Address'),
'lot_input_id': fields.many2one('stock.location', 'Location Input', required=True, domain=[('usage','<>','view')]),
'lot_stock_id': fields.many2one('stock.location', 'Location Stock', required=True, domain=[('usage','=','internal')]),
'lot_output_id': fields.many2one('stock.location', 'Location Output', required=True, domain=[('usage','<>','view')]),
}
def _default_lot_input_stock_id(self, cr, uid, context=None):
lot_input_stock = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'stock_location_stock')
return lot_input_stock.id
def _default_lot_output_id(self, cr, uid, context=None):
lot_output = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'stock_location_output')
return lot_output.id
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'lot_input_id': _default_lot_input_stock_id,
'lot_stock_id': _default_lot_input_stock_id,
'lot_output_id': _default_lot_output_id,
}
stock_warehouse()
#----------------------------------------------------------
# "Empty" Classes that are used to vary from the original stock.picking (that are dedicated to the internal pickings)
# in order to offer a different usability with different views, labels, available reports/wizards...
#----------------------------------------------------------
class stock_picking_in(osv.osv):
_name = "stock.picking.in"
_inherit = "stock.picking"
_table = "stock_picking"
_description = "Incoming Shipments"
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
return self.pool.get('stock.picking').search(cr, user, args, offset, limit, order, context, count)
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
return self.pool.get('stock.picking').read(cr, uid, ids, fields=fields, context=context, load=load)
def check_access_rights(self, cr, uid, operation, raise_exception=True):
#override in order to redirect the check of acces rights on the stock.picking object
return self.pool.get('stock.picking').check_access_rights(cr, uid, operation, raise_exception=raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
#override in order to redirect the check of acces rules on the stock.picking object
return self.pool.get('stock.picking').check_access_rule(cr, uid, ids, operation, context=context)
def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
#override in order to trigger the workflow of stock.picking at the end of create, write and unlink operation
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_trigger(cr, uid, ids, trigger, context=context)
def _workflow_signal(self, cr, uid, ids, signal, context=None):
#override in order to fire the workflow signal on given stock.picking workflow instance
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_signal(cr, uid, ids, signal, context=context)
_columns = {
'backorder_id': fields.many2one('stock.picking.in', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
'state': fields.selection(
[('draft', 'Draft'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Receive'),
('done', 'Received'),
('cancel', 'Cancelled'),],
'Status', readonly=True, select=True,
help="""* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Ready to Receive: products reserved, simply waiting for confirmation.\n
* Received: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""),
}
_defaults = {
'type': 'in',
}
class stock_picking_out(osv.osv):
_name = "stock.picking.out"
_inherit = "stock.picking"
_table = "stock_picking"
_description = "Delivery Orders"
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
return self.pool.get('stock.picking').search(cr, user, args, offset, limit, order, context, count)
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
return self.pool.get('stock.picking').read(cr, uid, ids, fields=fields, context=context, load=load)
def check_access_rights(self, cr, uid, operation, raise_exception=True):
#override in order to redirect the check of acces rights on the stock.picking object
return self.pool.get('stock.picking').check_access_rights(cr, uid, operation, raise_exception=raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
#override in order to redirect the check of acces rules on the stock.picking object
return self.pool.get('stock.picking').check_access_rule(cr, uid, ids, operation, context=context)
def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
#override in order to trigger the workflow of stock.picking at the end of create, write and unlink operation
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_trigger(cr, uid, ids, trigger, context=context)
def _workflow_signal(self, cr, uid, ids, signal, context=None):
#override in order to fire the workflow signal on given stock.picking workflow instance
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_signal(cr, uid, ids, signal, context=context)
_columns = {
'backorder_id': fields.many2one('stock.picking.out', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
'state': fields.selection(
[('draft', 'Draft'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Deliver'),
('done', 'Delivered'),
('cancel', 'Cancelled'),],
'Status', readonly=True, select=True,
help="""* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Ready to Deliver: products reserved, simply waiting for confirmation.\n
* Delivered: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""),
}
_defaults = {
'type': 'out',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
lepistone/odoo
|
addons/note_pad/note_pad.py
|
441
|
1301
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
class note_pad_note(osv.osv):
""" memo pad """
_name = 'note.note'
_inherit = ['pad.common','note.note']
_pad_fields = ['note_pad']
_columns = {
'note_pad_url': fields.char('Pad Url', pad_content_field='memo'),
}
|
agpl-3.0
|
Henrilin28/lightblue-0.4
|
src/linux/_obexcommon.py
|
49
|
18261
|
# Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
import _lightbluecommon
__all__ = ('OBEXResponse', 'OBEXError',
'CONTINUE', 'OK', 'CREATED', 'ACCEPTED', 'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT', 'RESET_CONTENT', 'PARTIAL_CONTENT',
'MULTIPLE_CHOICES', 'MOVED_PERMANENTLY', 'MOVED_TEMPORARILY', 'SEE_OTHER',
'NOT_MODIFIED', 'USE_PROXY',
'BAD_REQUEST', 'UNAUTHORIZED', 'PAYMENT_REQUIRED', 'FORBIDDEN',
'NOT_FOUND', 'METHOD_NOT_ALLOWED', 'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED', 'REQUEST_TIME_OUT', 'CONFLICT', 'GONE',
'LENGTH_REQUIRED', 'PRECONDITION_FAILED', 'REQUESTED_ENTITY_TOO_LARGE',
'REQUEST_URL_TOO_LARGE', 'UNSUPPORTED_MEDIA_TYPE',
'INTERNAL_SERVER_ERROR', 'NOT_IMPLEMENTED', 'BAD_GATEWAY',
'SERVICE_UNAVAILABLE', 'GATEWAY_TIMEOUT', 'HTTP_VERSION_NOT_SUPPORTED',
'DATABASE_FULL', 'DATABASE_LOCKED')
class OBEXError(_lightbluecommon.BluetoothError):
"""
Generic exception raised for OBEX-related errors.
"""
pass
class OBEXResponse:
"""
Contains the OBEX response received from an OBEX server.
When an OBEX client sends a request, the OBEX server sends back a response
code (to indicate whether the request was successful) and a set of response
headers (to provide other useful information).
For example, if a client sends a 'Get' request to retrieve a file, the
client might get a response like this:
>>> import lightblue
>>> client = lightblue.obex.OBEXClient("aa:bb:cc:dd:ee:ff", 10)
>>> response = client.get({"name": "file.txt"}, file("file.txt", "w"))
>>> print response
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={'length': 35288}>
You can get the response code and response headers in different formats:
>>> print response.reason
'OK' # a string description of the response code
>>> print response.code
32 # the response code (e.g. this is 0x20)
>>> print response.headers
{'length': 35288} # the headers, with string keys
>>> print response.rawheaders
{195: 35288} # the headers, with raw header ID keys
>>>
Note how the 'code' attribute does not have the final bit set - e.g. for
OK/Success, the response code is 0x20, not 0xA0.
The lightblue.obex module defines constants for response code values (e.g.
lightblue.obex.OK, lightblue.obex.FORBIDDEN, etc.).
"""
def __init__(self, code, rawheaders):
self.__code = code
self.__reason = _OBEX_RESPONSES.get(code, "Unknown response code")
self.__rawheaders = rawheaders
self.__headers = None
code = property(lambda self: self.__code,
doc='The response code, without the final bit set.')
reason = property(lambda self: self.__reason,
doc='A string description of the response code.')
rawheaders = property(lambda self: self.__rawheaders,
doc='The response headers, as a dictionary with header ID (unsigned byte) keys.')
def getheader(self, header, default=None):
'''
Returns the response header value for the given header, which may
either be a string (not case-sensitive) or the raw byte
value of the header ID.
Returns the specified default value if the header is not present.
'''
if isinstance(header, types.StringTypes):
return self.headers.get(header.lower(), default)
return self.__rawheaders.get(header, default)
def __getheaders(self):
if self.__headers is None:
self.__headers = {}
for headerid, value in self.__rawheaders.items():
if headerid in _HEADER_IDS_TO_STRINGS:
self.__headers[_HEADER_IDS_TO_STRINGS[headerid]] = value
else:
self.__headers["0x%02x" % headerid] = value
return self.__headers
headers = property(__getheaders,
doc='The response headers, as a dictionary with string keys.')
def __repr__(self):
return "<OBEXResponse reason='%s' code=0x%02x (0x%02x) headers=%s>" % \
(self.__reason, self.__code, (self.__code | 0x80), str(self.headers))
try:
import datetime
# as from python docs example
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
except:
pass # no datetime on pys60
_LOCAL_TIME_FORMAT = "%Y%m%dT%H%M%S"
_UTC_TIME_FORMAT = _LOCAL_TIME_FORMAT + "Z"
def _datetimefromstring(s):
import time
if s[-1:] == "Z":
# add UTC() instance as tzinfo
args = (time.strptime(s, _UTC_TIME_FORMAT)[0:6]) + (0, UTC())
return datetime.datetime(*args)
else:
return datetime.datetime(*(time.strptime(s, _LOCAL_TIME_FORMAT)[0:6]))
_HEADER_STRINGS_TO_IDS = {
"count": 0xc0,
"name": 0x01,
"type": 0x42,
"length": 0xc3,
"time": 0x44,
"description": 0x05,
"target": 0x46,
"http": 0x47,
"who": 0x4a,
"connection-id": 0xcb,
"application-parameters": 0x4c,
"authentication-challenge": 0x4d,
"authentication-response": 0x4e,
"creator-id": 0xcf,
"wan-uuid": 0x50,
"object-class": 0x51,
"session-parameters": 0x52,
"session-sequence-number": 0x93
}
_HEADER_IDS_TO_STRINGS = {}
for key, value in _HEADER_STRINGS_TO_IDS.items():
_HEADER_IDS_TO_STRINGS[value] = key
assert len(_HEADER_IDS_TO_STRINGS) == len(_HEADER_STRINGS_TO_IDS)
# These match the associated strings in httplib.responses, since OBEX response
# codes are matched to HTTP status codes (except for 0x60 and 0x61).
# Note these are the responses *without* the final bit set.
_OBEX_RESPONSES = {
0x10: "Continue",
0x20: "OK",
0x21: "Created",
0x22: "Accepted",
0x23: "Non-Authoritative Information",
0x24: "No Content",
0x25: "Reset Content",
0x26: "Partial Content",
0x30: "Multiple Choices",
0x31: "Moved Permanently",
0x32: "Moved Temporarily", # but is 'Found' (302) in httplib.response???
0x33: "See Other",
0x34: "Not Modified",
0x35: "Use Proxy",
0x40: "Bad Request",
0x41: "Unauthorized",
0x42: "Payment Required",
0x43: "Forbidden",
0x44: "Not Found",
0x45: "Method Not Allowed",
0x46: "Not Acceptable",
0x47: "Proxy Authentication Required",
0x48: "Request Timeout",
0x49: "Conflict",
0x4A: "Gone",
0x48: "Length Required",
0x4C: "Precondition Failed",
0x4D: "Request Entity Too Large",
0x4E: "Request-URI Too Long",
0x4F: "Unsupported Media Type",
0x50: "Internal Server Error",
0x51: "Not Implemented",
0x52: "Bad Gateway",
0x53: "Service Unavailable",
0x54: "Gateway Timeout",
0x55: "HTTP Version Not Supported",
0x60: "Database Full",
0x61: "Database Locked"
}
_obexclientclassdoc = \
"""
An OBEX client class. (Note this is not available on Python for Series 60.)
For example, to connect to an OBEX server and send a file:
>>> import lightblue
>>> client = lightblue.obex.OBEXClient("aa:bb:cc:dd:ee:ff", 10)
>>> client.connect()
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>> client.put({"name": "photo.jpg"}, file("photo.jpg", "rb"))
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>> client.disconnect()
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>>
A client must call connect() to establish a connection before it can send
any other requests.
The connect(), disconnect(), put(), delete(), get() and setpath() methods
all accept the request headers as a dictionary of header-value mappings. The
request headers are used to provide the server with additional information
for the request. For example, this sends a Put request that includes Name,
Type and Length headers in the request headers, to provide details about
the transferred file:
>>> f = file("file.txt")
>>> client.put({"name": "file.txt", "type": "text/plain",
... "length": 5192}, f)
>>>
Here is a list of all the different string header keys that you can use in
the request headers, and the expected type of the value for each header:
- "name" -> a string
- "type" -> a string
- "length" -> an int
- "time" -> a datetime object from the datetime module
- "description" -> a string
- "target" -> a string or buffer
- "http" -> a string or buffer
- "who" -> a string or buffer
- "connection-id" -> an int
- "application-parameters" -> a string or buffer
- "authentication-challenge" -> a string or buffer
- "authentication-response" -> a string or buffer
- "creator-id" -> an int
- "wan-uuid" -> a string or buffer
- "object-class" -> a string or buffer
- "session-parameters" -> a string or buffer
- "session-sequence-number" -> an int less than 256
(The string header keys are not case-sensitive.)
Alternatively, you can use raw header ID values instead of the above
convenience strings. So, the previous example can be rewritten as:
>>> client.put({0x01: "file.txt", 0x42: "text/plain", 0xC3: 5192},
... fileobject)
>>>
This is also useful for inserting custom headers. For example, a PutImage
request for a Basic Imaging client requires the Img-Descriptor (0x71)
header:
>>> client.put({"type": "x-bt/img-img",
... "name": "photo.jpg",
... 0x71: '<image-descriptor version="1.0"><image encoding="JPEG" pixel="160*120" size="37600"/></image-descriptor>'},
... file('photo.jpg', 'rb'))
>>>
Notice that the connection-id header is not sent, because this is
automatically included by OBEXClient in the request headers if a
connection-id was received in a previous Connect response.
See the included src/examples/obex_ftp_client.py for an example of using
OBEXClient to implement a File Transfer client for browsing the files on a
remote device.
"""
_obexclientdocs = {
"__init__":
"""
Creates an OBEX client.
Arguments:
- address: the address of the remote device
- channel: the RFCOMM channel of the remote OBEX service
""",
"connect":
"""
Establishes the Bluetooth connection to the remote OBEX server and sends
a Connect request to open the OBEX session. Returns an OBEXResponse
instance containing the server response.
Raises lightblue.obex.OBEXError if the session is already connected, or if
an error occurs during the request.
If the server refuses the Connect request (i.e. if it sends a response code
other than OK/Success), the Bluetooth connection will be closed.
Arguments:
- headers={}: the headers to send for the Connect request
""",
"disconnect":
"""
Sends a Disconnect request to end the OBEX session and closes the Bluetooth
connection to the remote OBEX server. Returns an OBEXResponse
instance containing the server response.
Raises lightblue.obex.OBEXError if connect() has not been called, or if an
error occurs during the request.
Note that you don't need to send any connection-id headers - this is
automatically included if the client received one in a Connect response.
Arguments:
- headers={}: the headers to send for the request
""",
"put":
"""
Sends a Put request. Returns an OBEXResponse instance containing the
server response.
Raises lightblue.obex.OBEXError if connect() has not been called, or if an
error occurs during the request.
Note that you don't need to send any connection-id headers - this is
automatically included if the client received one in a Connect response.
Arguments:
- headers: the headers to send for the request
- fileobj: a file-like object containing the file data to be sent for
the request
For example, to send a file named 'photo.jpg', using the request headers
to notify the server of the file's name, MIME type and length:
>>> client = lightblue.obex.OBEXClient("aa:bb:cc:dd:ee:ff", 10)
>>> client.connect()
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>> client.put({"name": "photo.jpg", "type": "image/jpeg",
"length": 28566}, file("photo.jpg", "rb"))
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>>
""",
"delete":
"""
Sends a Put-Delete request in order to delete a file or folder on the remote
server. Returns an OBEXResponse instance containing the server response.
Raises lightblue.obex.OBEXError if connect() has not been called, or if an
error occurs during the request.
Note that you don't need to send any connection-id headers - this is
automatically included if the client received one in a Connect response.
Arguments:
- headers: the headers to send for the request - you should use the
'name' header to specify the file you want to delete
If the file on the server can't be deleted because it's a read-only file,
you might get an 'Unauthorized' response, like this:
>>> client = lightblue.obex.OBEXClient("aa:bb:cc:dd:ee:ff", 10)
>>> client.connect()
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>> client.delete({"name": "random_file.txt"})
<OBEXResponse reason='Unauthorized' code=0x41 (0xc1) headers={}>
>>>
""",
"get":
"""
Sends a Get request. Returns an OBEXResponse instance containing the server
response.
Raises lightblue.obex.OBEXError if connect() has not been called, or if an
error occurs during the request.
Note that you don't need to send any connection-id headers - this is
automatically included if the client received one in a Connect response.
Arguments:
- headers: the headers to send for the request - you should use these
to specify the file you want to retrieve
- fileobj: a file-like object, to which the received data will be
written
An example:
>>> client = lightblue.obex.OBEXClient("aa:bb:cc:dd:ee:ff", 10)
>>> client.connect()
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>> f = file("received_file.txt", "w+")
>>> client.get({"name": "testfile.txt"}, f)
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={'length':9}>
>>> f.seek(0)
>>> f.read()
'test file'
>>>
""",
"setpath":
"""
Sends a SetPath request in order to set the "current path" on the remote
server for file transfers. Returns an OBEXResponse instance containing the
server response.
Raises lightblue.obex.OBEXError if connect() has not been called, or if an
error occurs during the request.
Note that you don't need to send any connection-id headers - this is
automatically included if the client received one in a Connect response.
Arguments:
- headers: the headers to send for the request - you should use the
'name' header to specify the directory you want to change to
- cdtoparent=False: True if the remote server should move up one
directory before applying the specified directory (i.e. 'cd
../dirname')
- createdirs=False: True if the specified directory should be created
if it doesn't exist (if False, the server will return an error
response if the directory doesn't exist)
For example:
# change to the "images" subdirectory
>>> client.setpath({"name": "images"})
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>>
# change to the parent directory
>>> client.setpath({}, cdtoparent=True)
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>>
# create a subdirectory "My_Files"
>>> client.setpath({"name": "My_Files"}, createdirs=True)
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>>
# change to the root directory - you can use an empty "name" header
# to specify this
>>> client.setpath({"name": ""})
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>>
"""
}
# response constants
CONTINUE = 0x10
OK = 0x20
CREATED = 0x21
ACCEPTED = 0x22
NON_AUTHORITATIVE_INFORMATION = 0x23
NO_CONTENT = 0x24
RESET_CONTENT = 0x25
PARTIAL_CONTENT = 0x26
MULTIPLE_CHOICES = 0x30
MOVED_PERMANENTLY = 0x31
MOVED_TEMPORARILY = 0x32
SEE_OTHER = 0x33
NOT_MODIFIED = 0x34
USE_PROXY = 0x35
BAD_REQUEST = 0x40
UNAUTHORIZED = 0x41
PAYMENT_REQUIRED = 0x42
FORBIDDEN = 0x43
NOT_FOUND = 0x44
METHOD_NOT_ALLOWED = 0x45
NOT_ACCEPTABLE = 0x46
PROXY_AUTHENTICATION_REQUIRED = 0x47
REQUEST_TIME_OUT = 0x48
CONFLICT = 0x49
GONE = 0x4A
LENGTH_REQUIRED = 0x4B
PRECONDITION_FAILED = 0x4C
REQUESTED_ENTITY_TOO_LARGE = 0x4D
REQUEST_URL_TOO_LARGE = 0x4E
UNSUPPORTED_MEDIA_TYPE = 0x4F
INTERNAL_SERVER_ERROR = 0x50
NOT_IMPLEMENTED = 0x51
BAD_GATEWAY = 0x52
SERVICE_UNAVAILABLE = 0x53
GATEWAY_TIMEOUT = 0x54
HTTP_VERSION_NOT_SUPPORTED = 0x55
DATABASE_FULL = 0x60
DATABASE_LOCKED = 0x61
|
gpl-3.0
|
Jewel-Systems/server
|
udp.py
|
1
|
1268
|
import socket
import threading
from log import log
web_server_port = 53455
listen_port = 53456
reply_port = 53457
debug_timeout = 10 # seconds
d = None
def daemon():
# find local IP
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 0))
local_ip_address = s.getsockname()[0]
s.close()
# listen and reply
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# s.settimeout(debug_timeout)
try:
s.bind(('0.0.0.0', listen_port))
except Exception:
log.warn('Could not bind 0.0.0.0:{}'.format(listen_port))
else:
log.debug('UDP bound')
while True:
try:
data, addr = s.recvfrom(4096) # blocks
print('{ip}:{port} {data}'.format(data=data.decode('ascii'), ip=addr[0], port=addr[1]))
reply = str(web_server_port).encode('ascii')
s.sendto(reply, (addr[0], reply_port))
log.debug('Sent a reply to ' + str(addr))
except socket.timeout:
print('Listening duration elapsed.')
break
s.close()
def go():
d = threading.Thread(name='daemon', target=daemon)
d.setDaemon(True)
d.start()
|
mit
|
40223236/lego
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_setups.py
|
791
|
16440
|
import io
import sys
import unittest
def resultFactory(*_):
return unittest.TestResult()
class TestSetups(unittest.TestCase):
def getRunner(self):
return unittest.TextTestRunner(resultclass=resultFactory,
stream=io.StringIO())
def runTests(self, *cases):
suite = unittest.TestSuite()
for case in cases:
tests = unittest.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
# creating a nested suite exposes some potential bugs
realSuite = unittest.TestSuite()
realSuite.addTest(suite)
# adding empty suites to the end exposes potential bugs
suite.addTest(unittest.TestSuite())
realSuite.addTest(unittest.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest.skip("hop")(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest.TestSuite((Test1('testOne'),))
second = unittest.TestSuite((Test1('testTwo'),))
third = unittest.TestSuite((Test2('testOne'),))
fourth = unittest.TestSuite((Test2('testTwo'),))
fifth = unittest.TestSuite((Test3('testOne'),))
sixth = unittest.TestSuite((Test3('testTwo'),))
suite = unittest.TestSuite((first, second, third, fourth, fifth, sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results,
['Module1.setUpModule', 'setup 1',
'Test1.testOne', 'Test1.testTwo', 'teardown 1',
'setup 2', 'Test2.testOne', 'Test2.testTwo',
'teardown 2', 'Module1.tearDownModule',
'Module2.setUpModule', 'setup 3',
'Test3.testOne', 'Test3.testTwo',
'teardown 3', 'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise unittest.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule', 'setUpClass', 'test_something', 'tearDownClass', 'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
_suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest.TestSuite()
suite.addTest(_suite)
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
with self.assertRaisesRegex(Exception, msg):
suite.debug()
if __name__ == '__main__':
unittest.main()
|
agpl-3.0
|
manipopopo/tensorflow
|
tensorflow/contrib/eager/python/examples/spinn/data_test.py
|
42
|
11219
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for SPINN data module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.spinn import data
class DataTest(tf.test.TestCase):
def setUp(self):
super(DataTest, self).setUp()
self._temp_data_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._temp_data_dir)
super(DataTest, self).tearDown()
def testGenNonParenthesisWords(self):
seq_with_parse = (
"( Man ( ( ( ( ( wearing pass ) ( on ( a lanyard ) ) ) and "
") ( standing ( in ( ( a crowd ) ( of people ) ) ) ) ) . ) )")
self.assertEqual(
["man", "wearing", "pass", "on", "a", "lanyard", "and", "standing",
"in", "a", "crowd", "of", "people", "."],
data.get_non_parenthesis_words(seq_with_parse.split(" ")))
def testGetShiftReduce(self):
seq_with_parse = (
"( Man ( ( ( ( ( wearing pass ) ( on ( a lanyard ) ) ) and "
") ( standing ( in ( ( a crowd ) ( of people ) ) ) ) ) . ) )")
self.assertEqual(
[3, 3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 2, 3, 3, 3, 3, 2, 3, 3, 2, 2, 2, 2, 2,
3, 2, 2], data.get_shift_reduce(seq_with_parse.split(" ")))
def testPadAndReverseWordIds(self):
id_sequences = [[0, 2, 3, 4, 5],
[6, 7, 8],
[9, 10, 11, 12, 13, 14, 15, 16]]
self.assertAllClose(
[[1, 1, 1, 1, 5, 4, 3, 2, 0],
[1, 1, 1, 1, 1, 1, 8, 7, 6],
[1, 16, 15, 14, 13, 12, 11, 10, 9]],
data.pad_and_reverse_word_ids(id_sequences))
def testPadTransitions(self):
unpadded = [[3, 3, 3, 2, 2, 2, 2],
[3, 3, 2, 2, 2]]
self.assertAllClose(
[[3, 3, 3, 2, 2, 2, 2],
[3, 3, 2, 2, 2, 1, 1]],
data.pad_transitions(unpadded))
def testCalculateBins(self):
length2count = {
1: 10,
2: 15,
3: 25,
4: 40,
5: 35,
6: 10}
self.assertEqual([2, 3, 4, 5, 6],
data.calculate_bins(length2count, 20))
self.assertEqual([3, 4, 6], data.calculate_bins(length2count, 40))
self.assertEqual([4, 6], data.calculate_bins(length2count, 60))
def testLoadVoacbulary(self):
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
fake_train_file = os.path.join(snli_1_0_dir, "snli_1.0_train.txt")
fake_dev_file = os.path.join(snli_1_0_dir, "snli_1.0_dev.txt")
os.makedirs(snli_1_0_dir)
with open(fake_train_file, "wt") as f:
f.write("gold_label\tsentence1_binary_parse\tsentence2_binary_parse\t"
"sentence1_parse\tsentence2_parse\tsentence1\tsentence2\t"
"captionID\tpairID\tlabel1\tlabel2\tlabel3\tlabel4\tlabel5\n")
f.write("neutral\t( ( Foo bar ) . )\t( ( foo baz ) . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
with open(fake_dev_file, "wt") as f:
f.write("gold_label\tsentence1_binary_parse\tsentence2_binary_parse\t"
"sentence1_parse\tsentence2_parse\tsentence1\tsentence2\t"
"captionID\tpairID\tlabel1\tlabel2\tlabel3\tlabel4\tlabel5\n")
f.write("neutral\t( ( Quux quuz ) ? )\t( ( Corge grault ) ! )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Quux quuz?\t.Corge grault!\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
vocab = data.load_vocabulary(self._temp_data_dir)
self.assertSetEqual(
{".", "?", "!", "foo", "bar", "baz", "quux", "quuz", "corge", "grault"},
vocab)
def testLoadVoacbularyWithoutFileRaisesError(self):
with self.assertRaisesRegexp(ValueError, "Cannot find SNLI data files at"):
data.load_vocabulary(self._temp_data_dir)
os.makedirs(os.path.join(self._temp_data_dir, "snli"))
with self.assertRaisesRegexp(ValueError, "Cannot find SNLI data files at"):
data.load_vocabulary(self._temp_data_dir)
os.makedirs(os.path.join(self._temp_data_dir, "snli/snli_1.0"))
with self.assertRaisesRegexp(ValueError, "Cannot find SNLI data files at"):
data.load_vocabulary(self._temp_data_dir)
def testLoadWordVectors(self):
glove_dir = os.path.join(self._temp_data_dir, "glove")
os.makedirs(glove_dir)
glove_file = os.path.join(glove_dir, "glove.42B.300d.txt")
words = [".", ",", "foo", "bar", "baz"]
with open(glove_file, "wt") as f:
for i, word in enumerate(words):
f.write("%s " % word)
for j in range(data.WORD_VECTOR_LEN):
f.write("%.5f" % (i * 0.1))
if j < data.WORD_VECTOR_LEN - 1:
f.write(" ")
else:
f.write("\n")
vocab = {"foo", "bar", "baz", "qux", "."}
# Notice that "qux" is not present in `words`.
word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)
self.assertEqual(6, len(word2index))
self.assertEqual(0, word2index["<unk>"])
self.assertEqual(1, word2index["<pad>"])
self.assertEqual(2, word2index["."])
self.assertEqual(3, word2index["foo"])
self.assertEqual(4, word2index["bar"])
self.assertEqual(5, word2index["baz"])
self.assertEqual((6, data.WORD_VECTOR_LEN), embed.shape)
self.assertAllClose([0.0] * data.WORD_VECTOR_LEN, embed[0, :])
self.assertAllClose([0.0] * data.WORD_VECTOR_LEN, embed[1, :])
self.assertAllClose([0.0] * data.WORD_VECTOR_LEN, embed[2, :])
self.assertAllClose([0.2] * data.WORD_VECTOR_LEN, embed[3, :])
self.assertAllClose([0.3] * data.WORD_VECTOR_LEN, embed[4, :])
self.assertAllClose([0.4] * data.WORD_VECTOR_LEN, embed[5, :])
def testLoadWordVectorsWithoutFileRaisesError(self):
vocab = {"foo", "bar", "baz", "qux", "."}
with self.assertRaisesRegexp(
ValueError, "Cannot find GloVe embedding file at"):
data.load_word_vectors(self._temp_data_dir, vocab)
os.makedirs(os.path.join(self._temp_data_dir, "glove"))
with self.assertRaisesRegexp(
ValueError, "Cannot find GloVe embedding file at"):
data.load_word_vectors(self._temp_data_dir, vocab)
def _createFakeSnliData(self, fake_snli_file):
# Four sentences in total.
with open(fake_snli_file, "wt") as f:
f.write("gold_label\tsentence1_binary_parse\tsentence2_binary_parse\t"
"sentence1_parse\tsentence2_parse\tsentence1\tsentence2\t"
"captionID\tpairID\tlabel1\tlabel2\tlabel3\tlabel4\tlabel5\n")
f.write("neutral\t( ( Foo bar ) . )\t( ( foo . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("contradiction\t( ( Bar foo ) . )\t( ( baz . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("entailment\t( ( Quux quuz ) . )\t( ( grault . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("entailment\t( ( Quuz quux ) . )\t( ( garply . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
def _createFakeGloveData(self, glove_file):
words = [".", "foo", "bar", "baz", "quux", "quuz", "grault", "garply"]
with open(glove_file, "wt") as f:
for i, word in enumerate(words):
f.write("%s " % word)
for j in range(data.WORD_VECTOR_LEN):
f.write("%.5f" % (i * 0.1))
if j < data.WORD_VECTOR_LEN - 1:
f.write(" ")
else:
f.write("\n")
def testEncodeSingleSentence(self):
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
fake_train_file = os.path.join(snli_1_0_dir, "snli_1.0_train.txt")
os.makedirs(snli_1_0_dir)
self._createFakeSnliData(fake_train_file)
vocab = data.load_vocabulary(self._temp_data_dir)
glove_dir = os.path.join(self._temp_data_dir, "glove")
os.makedirs(glove_dir)
glove_file = os.path.join(glove_dir, "glove.42B.300d.txt")
self._createFakeGloveData(glove_file)
word2index, _ = data.load_word_vectors(self._temp_data_dir, vocab)
sentence_variants = [
"( Foo ( ( bar baz ) . ) )",
" ( Foo ( ( bar baz ) . ) ) ",
"( Foo ( ( bar baz ) . ) )"]
for sentence in sentence_variants:
word_indices, shift_reduce = data.encode_sentence(sentence, word2index)
self.assertEqual(np.int64, word_indices.dtype)
self.assertEqual((5, 1), word_indices.shape)
self.assertAllClose(
np.array([[3, 3, 3, 2, 3, 2, 2]], dtype=np.int64).T, shift_reduce)
def testSnliData(self):
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
fake_train_file = os.path.join(snli_1_0_dir, "snli_1.0_train.txt")
os.makedirs(snli_1_0_dir)
self._createFakeSnliData(fake_train_file)
glove_dir = os.path.join(self._temp_data_dir, "glove")
os.makedirs(glove_dir)
glove_file = os.path.join(glove_dir, "glove.42B.300d.txt")
self._createFakeGloveData(glove_file)
vocab = data.load_vocabulary(self._temp_data_dir)
word2index, _ = data.load_word_vectors(self._temp_data_dir, vocab)
train_data = data.SnliData(fake_train_file, word2index)
self.assertEqual(4, train_data.num_batches(1))
self.assertEqual(2, train_data.num_batches(2))
self.assertEqual(2, train_data.num_batches(3))
self.assertEqual(1, train_data.num_batches(4))
generator = train_data.get_generator(2)()
for _ in range(2):
label, prem, prem_trans, hypo, hypo_trans = next(generator)
self.assertEqual(2, len(label))
self.assertEqual((4, 2), prem.shape)
self.assertEqual((5, 2), prem_trans.shape)
self.assertEqual((3, 2), hypo.shape)
self.assertEqual((3, 2), hypo_trans.shape)
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
z1gm4/desarrollo_web_udp
|
env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/fields.py
|
200
|
5872
|
from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
|
gpl-3.0
|
renner/spacewalk
|
client/tools/rhn-virtualization/virtualization/state.py
|
7
|
2331
|
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
from virtualization.constants import StateType
###############################################################################
# Classes
###############################################################################
class State:
"""
This class represents the state of a virtual instance. It provides
abstraction to categorize the state into running, stopped, paused, or
crashed.
"""
def __init__(self, state_type):
"""
Create a new state. If state_type is None, this state is assumed to be
stopped. If state_type is not None, it must be a StateType type.
"""
self.__state_type = state_type
def get_state_type(self):
"""
Returns the state type used to create this instance.
"""
return self.__state_type
def is_running(self):
"""
Returns true if this object represents a running state.
"""
return self.__state_type == StateType.NOSTATE or \
self.__state_type == StateType.RUNNING or \
self.__state_type == StateType.BLOCKED or \
self.__state_type == StateType.SHUTDOWN
def is_paused(self):
"""
Returns true if this object represents a paused instance.
"""
return self.__state_type == StateType.PAUSED
def is_stopped(self):
"""
Returns true if this object represents a stopped instance.
"""
return self.__state_type == None or \
self.__state_type == StateType.SHUTOFF
def is_crashed(self):
"""
Returns true if this object represents a crashed instance.
"""
return self.__state_type == StateType.CRASHED
|
gpl-2.0
|
jniediek/mne-python
|
mne/channels/tests/test_interpolation.py
|
3
|
5400
|
import os.path as op
import warnings
import numpy as np
from numpy.testing import (assert_allclose, assert_array_equal)
from nose.tools import assert_raises, assert_equal, assert_true
from mne import io, pick_types, pick_channels, read_events, Epochs
from mne.channels.interpolation import _make_interpolation_matrix
from mne.utils import run_tests_if_main, slow_test
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.2, 0.5
event_id_2 = 2
def _load_data():
"""Helper function to load data."""
# It is more memory efficient to load data in a separate
# function so it's loaded on-demand
raw = io.read_raw_fif(raw_fname, add_eeg_ref=False)
events = read_events(event_name)
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude=[])
# select every second channel for faster speed but compensate by using
# mode='accurate'.
picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude=[])[1::2]
picks = pick_types(raw.info, meg=True, eeg=True, exclude=[])
with warnings.catch_warnings(record=True): # proj
epochs_eeg = Epochs(raw, events, event_id, tmin, tmax, picks=picks_eeg,
preload=True, reject=dict(eeg=80e-6))
epochs_meg = Epochs(raw, events, event_id, tmin, tmax, picks=picks_meg,
preload=True,
reject=dict(grad=1000e-12, mag=4e-12))
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, reject=dict(eeg=80e-6, grad=1000e-12,
mag=4e-12))
return raw, epochs, epochs_eeg, epochs_meg
@slow_test
def test_interpolation():
"""Test interpolation"""
raw, epochs, epochs_eeg, epochs_meg = _load_data()
# It's a trade of between speed and accuracy. If every second channel is
# selected the tests are more than 3x faster but the correlation
# drops to 0.8
thresh = 0.80
# create good and bad channels for EEG
epochs_eeg.info['bads'] = []
goods_idx = np.ones(len(epochs_eeg.ch_names), dtype=bool)
goods_idx[epochs_eeg.ch_names.index('EEG 012')] = False
bads_idx = ~goods_idx
evoked_eeg = epochs_eeg.average()
ave_before = evoked_eeg.data[bads_idx]
# interpolate bad channels for EEG
pos = epochs_eeg._get_channel_positions()
pos_good = pos[goods_idx]
pos_bad = pos[bads_idx]
interpolation = _make_interpolation_matrix(pos_good, pos_bad)
assert_equal(interpolation.shape, (1, len(epochs_eeg.ch_names) - 1))
ave_after = np.dot(interpolation, evoked_eeg.data[goods_idx])
epochs_eeg.info['bads'] = ['EEG 012']
evoked_eeg = epochs_eeg.average()
assert_array_equal(ave_after, evoked_eeg.interpolate_bads().data[bads_idx])
assert_allclose(ave_before, ave_after, atol=2e-6)
# check that interpolation fails when preload is False
epochs_eeg.preload = False
assert_raises(ValueError, epochs_eeg.interpolate_bads)
epochs_eeg.preload = True
# check that interpolation changes the data in raw
raw_eeg = io.RawArray(data=epochs_eeg._data[0], info=epochs_eeg.info)
raw_before = raw_eeg._data[bads_idx]
raw_after = raw_eeg.interpolate_bads()._data[bads_idx]
assert_equal(np.all(raw_before == raw_after), False)
# check that interpolation fails when preload is False
for inst in [raw, epochs]:
assert hasattr(inst, 'preload')
inst.preload = False
inst.info['bads'] = [inst.ch_names[1]]
assert_raises(ValueError, inst.interpolate_bads)
# check that interpolation works when non M/EEG channels are present
# before MEG channels
with warnings.catch_warnings(record=True): # change of units
raw.rename_channels({'MEG 0113': 'TRIGGER'})
raw.set_channel_types({'TRIGGER': 'stim'})
raw.info['bads'] = [raw.info['ch_names'][1]]
raw.load_data()
raw.interpolate_bads()
# check that interpolation works for MEG
epochs_meg.info['bads'] = ['MEG 0141']
evoked = epochs_meg.average()
pick = pick_channels(epochs_meg.info['ch_names'], epochs_meg.info['bads'])
# MEG -- raw
raw_meg = io.RawArray(data=epochs_meg._data[0], info=epochs_meg.info)
raw_meg.info['bads'] = ['MEG 0141']
data1 = raw_meg[pick, :][0][0]
# reset_bads=False here because epochs_meg appears to share the same info
# dict with raw and we want to test the epochs functionality too
raw_meg.info.normalize_proj()
data2 = raw_meg.interpolate_bads(reset_bads=False)[pick, :][0][0]
assert_true(np.corrcoef(data1, data2)[0, 1] > thresh)
# the same number of bads as before
assert_true(len(raw_meg.info['bads']) == len(raw_meg.info['bads']))
# MEG -- epochs
data1 = epochs_meg.get_data()[:, pick, :].ravel()
epochs_meg.info.normalize_proj()
epochs_meg.interpolate_bads()
data2 = epochs_meg.get_data()[:, pick, :].ravel()
assert_true(np.corrcoef(data1, data2)[0, 1] > thresh)
assert_true(len(raw_meg.info['bads']) == 0)
# MEG -- evoked
data1 = evoked.data[pick]
evoked.info.normalize_proj()
data2 = evoked.interpolate_bads().data[pick]
assert_true(np.corrcoef(data1, data2)[0, 1] > thresh)
run_tests_if_main()
|
bsd-3-clause
|
sergiohgz/incubator-airflow
|
airflow/www/views.py
|
2
|
111267
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ast
import codecs
import copy
import datetime as dt
import inspect
import itertools
import json
import logging
import math
import os
import traceback
from collections import defaultdict
from datetime import timedelta
from functools import wraps
from textwrap import dedent
import bleach
import markdown
import nvd3
import pendulum
import pkg_resources
import sqlalchemy as sqla
from flask import (
abort, jsonify, redirect, url_for, request, Markup, Response,
current_app, render_template, make_response)
from flask import flash
from flask._compat import PY2
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.contrib.sqla import ModelView
from flask_admin.form.fields import DateTimeField
from flask_admin.tools import iterdecode
from jinja2 import escape
from jinja2.sandbox import ImmutableSandboxedEnvironment
from past.builtins import basestring, unicode
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
from sqlalchemy import or_, desc, and_, union_all
from wtforms import (
Form, SelectField, TextAreaField, PasswordField,
StringField, validators)
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.api.common.experimental.mark_tasks import (set_dag_run_state_to_running,
set_dag_run_state_to_success,
set_dag_run_state_to_failed)
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.models import XCom, DagRun
from airflow.operators.subdag_operator import SubDagOperator
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.utils import timezone
from airflow.utils.dates import infer_time_unit, scale_time_units, parse_execution_date
from airflow.utils.db import create_session, provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.json import json_ser
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from airflow.www import utils as wwwutils
from airflow.www.forms import (DateTimeForm, DateTimeWithNumRunsForm,
DateTimeWithNumRunsWithDagRunsForm)
from airflow.www.validators import GreaterEqualThan
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
UTF8_READER = codecs.getreader('utf-8')
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
PAGE_SIZE = conf.getint('webserver', 'page_size')
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
if m.dag_id is None:
return Markup()
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=dag_id,
execution_date=m.execution_date)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def dag_run_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=m.dag_id,
run_id=m.run_id,
execution_date=m.execution_date)
return Markup('<a href="{url}">{m.run_id}</a>'.format(**locals()))
def task_instance_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
task_id = bleach.clean(m.task_id)
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def parse_datetime_f(value):
if not isinstance(value, dt.datetime):
return value
return timezone.make_aware(value)
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if timezone.utcnow().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
"""Decorator for views requiring data profiling access"""
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def get_chart_height(dag):
"""
TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to
approximate the size of generated chart (otherwise the charts are tiny and unreadable
when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height
charts, that is charts that take up space based on the size of the components within.
"""
return 600 + len(dag.tasks) * 10
def get_date_time_num_runs_dag_runs_form_data(request, session, dag):
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
dttm = dag.latest_execution_date or timezone.utcnow()
base_date = request.args.get('base_date')
if base_date:
base_date = timezone.parse(base_date)
else:
# The DateTimeField widget truncates milliseconds and would loose
# the first dag run. Round to next second.
base_date = (dttm + timedelta(seconds=1)).replace(microsecond=0)
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
DR = models.DagRun
drs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date)
.order_by(desc(DR.execution_date))
.limit(num_runs)
.all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
# Happens if base_date was changed and the selected dag run is not in result
if not dr_state and drs:
dr = drs[0]
dttm = dr.execution_date
dr_state = dr.state
return {
'dttm': dttm,
'base_date': base_date,
'num_runs': num_runs,
'execution_date': dttm.isoformat(),
'dr_choices': dr_choices,
'dr_state': dr_state,
}
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
if conf.getboolean('core', 'secure_mode'):
abort(404)
with create_session() as session:
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
payload = {
"state": "ERROR",
"error": ""
}
# Processing templated fields
try:
args = ast.literal_eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns' and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
if conf.getboolean('core', 'secure_mode'):
abort(404)
with create_session() as session:
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
@login_required
@provide_session
def dag_stats(self, session=None):
ds = models.DagStat
ds.update(
dag_ids=[dag.dag_id for dag in dagbag.dags.values() if not dag.is_subdag]
)
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
@login_required
@provide_session
def task_stats(self, session=None):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True)
.filter(Dag.is_subdag == False)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True)
.filter(Dag.is_subdag == False)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
@provide_session
def dag_details(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=get_hostname()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=get_hostname(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title, )
@expose('/get_logs_with_metadata')
@login_required
@wwwutils.action_logging
@provide_session
def get_logs_with_metadata(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
try_number = int(request.args.get('try_number'))
metadata = request.args.get('metadata')
metadata = json.loads(metadata)
# metadata may be null
if not metadata:
metadata = {}
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(
execution_date))
response = jsonify({'error': error_message})
response.status_code = 400
return response
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('core', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
try:
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
metadata['end_of_log'] = True
else:
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(ti.task_id)
logs, metadatas = handler.read(ti, try_number, metadata=metadata)
metadata = metadatas[0]
for i, log in enumerate(logs):
if PY2 and not isinstance(log, unicode):
logs[i] = log.decode('utf-8')
message = logs[0]
return jsonify(message=message, metadata=metadata)
except AttributeError as e:
error_message = ["Task log handler {} does not support read logs.\n{}\n"
.format(task_log_reader, str(e))]
metadata['end_of_log'] = True
return jsonify(message=error_message, error=True, metadata=metadata)
@expose('/log')
@login_required
@wwwutils.action_logging
@provide_session
def log(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
logs = [''] * (ti.next_try_number - 1 if ti is not None else 0)
return self.render(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts",
dag_id=dag.dag_id, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running.
In most cases this just means that the task will probably
be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
- The following configuration values may be limiting the number
of queueable processes:
<code>parallelism</code>,
<code>dag_concurrency</code>,
<code>max_active_dag_runs_per_dag</code>,
<code>non_pooled_task_slot_count</code><br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow """
"""administrator for assistance."""
.format(
"- This task instance already ran and had its state changed "
"manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
@provide_session
def xcom(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import GetDefaultExecutor
from airflow.executors.celery_executor import CeleryExecutor
executor = GetDefaultExecutor()
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/delete')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def delete(self):
from airflow.api.common.experimental import delete_dag
from airflow.exceptions import DagNotFound, DagFileExists
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash("DAG with id {} not found. Cannot delete".format(dag_id))
return redirect(request.referrer)
except DagFileExists:
flash("Dag id {} is still in DagBag. "
"Remove the DAG file first.".format(dag_id))
return redirect(request.referrer)
flash("Deleting DAG with id {}. May take a couple minutes to fully"
" disappear.".format(dag_id))
# Upon successful delete return to origin
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed)
@expose('/dagrun_clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = pendulum.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked')
@login_required
@provide_session
def blocked(self, session=None):
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed)
if confirmed:
flash('Marked failed on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as failed"),
details=details)
return response
def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_success(dag, execution_date,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as success"),
details=details)
return response
@expose('/dagrun_failed')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_failed(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
return self._mark_dagrun_state_as_failed(dag_id, execution_date,
confirmed, origin)
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
return self._mark_dagrun_state_as_success(dag_id, execution_date,
confirmed, origin)
def _mark_task_instance_state(self, dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, state):
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = pendulum.parse(execution_date)
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=True)
flash("Marked {} on {} task instances".format(state, len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as {}:".format(state)),
details=details)
return response
@expose('/failed')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def failed(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.FAILED)
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.SUCCESS)
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
@provide_session
def tree(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date)
.order_by(DR.execution_date.desc())
.limit(num_runs)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
min_date = min(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = timezone.utcnow() - pendulum.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur, num_runs=num_runs)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
@provide_session
def graph(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dt_nr_dr_data['arrange'] = arrange
dttm = dt_nr_dr_data['dttm']
class GraphForm(DateTimeWithNumRunsWithDagRunsForm):
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dt_nr_dr_data['dr_state']),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2), )
@expose('/duration')
@login_required
@wwwutils.action_logging
@provide_session
def duration(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
cum_chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$(function() {$( document ).trigger('chartload') })" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
@provide_session
def tries(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
@provide_session
def landing_times(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
if ti.end_date:
ts = ti.execution_date
following_schedule = dag.following_schedule(ts)
if dag.schedule_interval and following_schedule:
ts = following_schedule
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
@provide_session
def paused(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
@provide_session
def refresh(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = timezone.utcnow()
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
@provide_session
def gantt(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dttm = dt_nr_dr_data['dttm']
form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
TF = models.TaskFail
ti_fails = list(itertools.chain(*[(
session
.query(TF)
.filter(TF.dag_id == ti.dag_id,
TF.task_id == ti.task_id,
TF.execution_date == ti.execution_date)
.all()
) for ti in tis]))
tis_with_fails = sorted(tis + ti_fails, key=lambda ti: ti.start_date)
tasks = []
for ti in tis_with_fails:
end_date = ti.end_date if ti.end_date else timezone.utcnow()
state = ti.state if type(ti) == models.TaskInstance else State.FAILED
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': state,
'executionDate': ti.execution_date.isoformat(),
})
states = {task['status']: task['status'] for task in tasks}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
@provide_session
def task_instances(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
with create_session() as session:
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
# prevent XSS
form = escape(form)
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
d = json.load(UTF8_READER(request.files['file']))
except Exception as e:
flash("Missing file or syntax error: {}.".format(e))
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
@provide_session
def index(self, session=None):
DM = models.DagModel
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
sql_query = session.query(DM)
if do_filter and owner_mode == 'ldapgroup':
sql_query = sql_query.filter(
~DM.is_subdag,
DM.is_active,
DM.owners.in_(current_user.ldap_groups)
)
elif do_filter and owner_mode == 'user':
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
)
else:
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active
)
# optionally filter out "paused" dags
if hide_paused:
sql_query = sql_query.filter(~DM.is_paused)
orm_dags = {dag.dag_id: dag for dag
in sql_query
.all()}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
if arg_search_query:
lower_search_query = arg_search_query.lower()
# filter by dag_id
webserver_dags_filtered = {
dag_id: dag
for dag_id, dag in webserver_dags.items()
if (lower_search_query in dag_id.lower() or
lower_search_query in dag.owner.lower())
}
all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()
if lower_search_query in dag.dag_id.lower() or
lower_search_query in dag.owners.lower()]) |
set(webserver_dags_filtered.keys()))
sorted_dag_ids = sorted(all_dag_ids)
else:
webserver_dags_filtered = webserver_dags
sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
start = current_page * dags_per_page
end = start + dags_per_page
num_of_all_dags = len(sorted_dag_ids)
page_dag_ids = sorted_dag_ids[start:end]
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
auto_complete_data = set()
for dag in webserver_dags_filtered.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owner)
for dag in orm_dags.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owners)
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags_filtered,
orm_dags=orm_dags,
hide_paused=hide_paused,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=start + 1,
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page, num_of_pages,
search=arg_search_query,
showPaused=not hide_paused),
dag_ids_in_page=page_dag_ids,
auto_complete_data=auto_complete_data)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
@provide_session
def query(self, session=None):
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = PAGE_SIZE
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
form_args = {
'pool': {
'validators': [
validators.DataRequired(),
]
}
}
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
filter_converter = wwwutils.UtcFilterConverter()
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
@provide_session
def _connection_ids(session=None):
return [
(c.conn_id, c.conn_id)
for c in (
session.query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',
)
column_list = (
'label',
'conn_id',
'chart_type',
'owner',
'last_modified',
)
column_sortable_list = (
'label',
'conn_id',
'chart_type',
('owner', 'owner.username'),
'last_modified',
)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': _connection_ids()
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = timezone.utcnow()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description',
)
form_args = {
'label': {
'validators': [
validators.DataRequired(),
],
},
'event_type': {
'validators': [
validators.DataRequired(),
],
},
'start_date': {
'validators': [
validators.DataRequired(),
],
'filters': [
parse_datetime_f,
],
},
'end_date': {
'validators': [
validators.DataRequired(),
GreaterEqualThan(fieldname='start_date'),
],
'filters': [
parse_datetime_f,
]
},
'reported_by': {
'validators': [
validators.DataRequired(),
],
}
}
column_list = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
)
column_default_sort = ("start_date", True)
column_sortable_list = (
'label',
# todo: yes this has a spelling error
('event_type', 'event_type.know_event_type'),
'start_date',
'end_date',
('reported_by', 'reported_by.username'),
)
filter_converter = wwwutils.UtcFilterConverter()
form_overrides = dict(start_date=DateTimeField, end_date=DateTimeField)
class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if wwwutils.should_hide_value_for_key(model.key):
return Markup('*' * 8)
val = getattr(model, name)
if val:
return val
else:
return Markup('<span class="label label-danger">Invalid</span>')
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val', 'is_encrypted',)
column_default_sort = ('key', False)
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
form_args = {
'key': {
'validators': {
validators.DataRequired(),
},
},
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter,
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
@provide_session
def action_varexport(self, ids, session=None):
V = models.Variable
qry = session.query(V).filter(V.id.in_(ids)).all()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
form_args = {
'execution_date': {
'filters': [
parse_datetime_f,
]
}
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
filter_converter = wwwutils.UtcFilterConverter()
form_overrides = dict(execution_date=DateTimeField)
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_display_actions = False
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
filter_converter = wwwutils.UtcFilterConverter()
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
filter_converter = wwwutils.UtcFilterConverter()
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link,
run_id=dag_run_link
)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
@provide_session
def action_new_delete(self, ids, session=None):
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun) \
.filter(models.DagRun.id.in_(ids)) \
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
dirty_ids.append(row.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
@action('set_running', "Set state to 'running'", None)
@provide_session
def action_set_running(self, ids, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = State.RUNNING
dr.start_date = timezone.utcnow()
models.DagStat.update(dirty_ids, session=session)
flash(
"{count} dag runs were set to running".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
@action('set_failed', "Set state to 'failed'",
"All running task instances would also be marked as failed, are you sure?")
@provide_session
def action_set_failed(self, ids, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_failed(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
models.DagStat.update(dirty_ids, session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to failed".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
@action('set_success', "Set state to 'success'",
"All task instances would also be marked as success, are you sure?")
@provide_session
def action_set_success(self, ids, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_success(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
models.DagStat.update(dirty_ids, session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to success".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
# Called after editing DagRun model in the UI.
@provide_session
def after_model_change(self, form, dagrun, is_created, session=None):
altered_tis = []
if dagrun.state == State.SUCCESS:
altered_tis = set_dag_run_state_to_success(
dagbag.get_dag(dagrun.dag_id),
dagrun.execution_date,
commit=True)
elif dagrun.state == State.FAILED:
altered_tis = set_dag_run_state_to_failed(
dagbag.get_dag(dagrun.dag_id),
dagrun.execution_date,
commit=True,
session=session)
elif dagrun.state == State.RUNNING:
altered_tis = set_dag_run_state_to_running(
dagbag.get_dag(dagrun.dag_id),
dagrun.execution_date,
commit=True,
session=session)
altered_ti_count = len(altered_tis)
models.DagStat.update([dagrun.dag_id], session=session)
flash(
"1 dag run and {altered_ti_count} task instances "
"were set to '{dagrun.state}'".format(**locals()))
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_display_actions = False
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date', 'extra')
filter_converter = wwwutils.UtcFilterConverter()
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
filter_converter = wwwutils.UtcFilterConverter()
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link,
run_id=dag_run_link,
duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
page_size = PAGE_SIZE
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@provide_session
@action('clear',
lazy_gettext('Clear'),
lazy_gettext(
'Are you sure you want to clear the state of the selected task instance(s)'
' and set their dagruns to the running state?'))
def action_clear(self, ids, session=None):
try:
TI = models.TaskInstance
dag_to_task_details = {}
dag_to_tis = {}
# Collect dags upfront as dagbag.get_dag() will reset the session
for id_str in ids:
task_id, dag_id, execution_date = iterdecode(id_str)
dag = dagbag.get_dag(dag_id)
task_details = dag_to_task_details.setdefault(dag, [])
task_details.append((task_id, execution_date))
for dag, task_details in dag_to_task_details.items():
for task_id, execution_date in task_details:
execution_date = parse_execution_date(execution_date)
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag.dag_id,
TI.execution_date == execution_date).one()
tis = dag_to_tis.setdefault(dag, [])
tis.append(ti)
for dag, tis in dag_to_tis.items():
models.clear_task_instances(tis, session, dag=dag)
session.commit()
flash("{0} task instances have been cleared".format(len(ids)))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = iterdecode(id)
execution_date = parse_execution_date(execution_date)
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = pendulum.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField, _extra=TextAreaField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma separated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
filter_converter = wwwutils.UtcFilterConverter()
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = PAGE_SIZE
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
|
apache-2.0
|
alown/chromium
|
mothership/tools/tiledialog.py
|
4
|
4700
|
# Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
#
# Authors:
# Brian Paul
"""The TileDialog class is a dialog used to edit a list of tiles for
a server/network node. If the server node is an N-instance node the
dialog will display a spin control [1 .. N] to edit the tile list for
any of the N instances.
"""
from wxPython.wx import *
from wxPython.gizmos import *
import crutils
class TileDialog(wxDialog):
def __init__(self, parent, id, title, numLists, hosts=[""], message=""):
"""parent, id, and title are the standard wxDialog parameters.
"""
assert numLists >= 1
wxDialog.__init__(self, parent, id, title, pos=wxPoint(-1,-1),
style = wxDEFAULT_DIALOG_STYLE|wxRESIZE_BORDER)
id_OK = 1
id_CANCEL = 2
id_INSTANCE = 3
outerSizer = wxBoxSizer(wxVERTICAL)
if numLists > 1:
# spin box to choose node instance
box = wxStaticBox(parent=self, id=-1, label="Node Instance")
innerSizer = wxStaticBoxSizer(box, wxHORIZONTAL)
outerSizer.Add(innerSizer, 0, wxGROW|wxALL, 4)
label = wxStaticText(parent=self, id=-1, label="Instance:")
innerSizer.Add(label, flag=wxALIGN_CENTRE_VERTICAL|wxALL, border=2)
self.instanceCtrl = wxSpinCtrl(parent=self, id=id_INSTANCE,
size=wxSize(50,25),
min=1, max=numLists, value="1")
EVT_SPINCTRL(self.instanceCtrl, id_INSTANCE, self._onInstance)
self.hostLabel = wxStaticText(parent=self, id=-1,
label="Hostname: %s" % hosts[0])
innerSizer.Add(self.instanceCtrl,
flag=wxALIGN_CENTRE_VERTICAL|wxALL, border=2)
innerSizer.Add(self.hostLabel, flag=wxALIGN_CENTRE_VERTICAL|wxALL,
border=6)
# editable list of tile tuples
box = wxStaticBox(parent=self, id=-1, label="Edit Tile List")
innerSizer = wxStaticBoxSizer(box, wxVERTICAL)
outerSizer.Add(innerSizer, 1, wxALL|wxGROW, 4)
self.listBox = wxEditableListBox(parent=self, id=-1,
label="Tiles (x, y, width, height)",
size=(300, 200))
innerSizer.Add(self.listBox, 1, wxGROW|wxALL, 2)
# OK / Cancel buttons
rowSizer = wxGridSizer(rows=1, cols=2, vgap=4, hgap=20)
self.OkButton = wxButton(parent=self, id=id_OK, label="OK")
rowSizer.Add(self.OkButton, 0, wxALIGN_CENTER)
self.CancelButton = wxButton(parent=self, id=id_CANCEL, label="Cancel")
rowSizer.Add(self.CancelButton, 0, wxALIGN_CENTER)
outerSizer.Add(rowSizer, 0, wxGROW|wxALL, 4)
EVT_BUTTON(self.OkButton, id_OK, self._onOK)
EVT_BUTTON(self.CancelButton, id_CANCEL, self._onCancel)
min = outerSizer.GetMinSize()
self.SetSizer(outerSizer)
self.SetAutoLayout(true)
self.SetSizeHints(minW=min[0], minH=min[1])
self.SetSize(min)
self.TileListList = [] # array [numLists] of array of (x, y, w, h)
self.NumLists = numLists
for i in range(numLists):
self.TileListList.append( [] )
self.OldInstance = 1
self.Hosts = hosts
def __LoadWidget(self, i):
"""Load the widget with the ith tile list."""
strings = []
if i < len(self.TileListList):
for tile in self.TileListList[i]:
tileString = "(%d, %d, %d, %d)" % tile
strings.append(tileString)
self.listBox.SetStrings(strings)
def __ReadWidget(self, i):
"""Get the strings from the listBox and update the ith tile list."""
assert i >= 0
assert i < self.NumLists
strings = self.listBox.GetStrings()
tiles = []
for s in strings:
# parse "(x,y,w,h)" to get tuple (x,y,w,h)
# XXX probably need an exception handler
tile = eval(s)
if tile and len(tile) == 4:
tiles.append(tile)
self.TileListList[i] = tiles
def _onInstance(self, event):
"""Called when the instance spin control changes."""
self.__ReadWidget(self.OldInstance - 1)
i = self.instanceCtrl.GetValue()
assert i >= 1
self.__LoadWidget(i - 1)
if i - 1 < len(self.Hosts):
self.hostLabel.SetLabel("Hostname: %s" % self.Hosts[i - 1])
else:
# use last hostname
self.hostLabel.SetLabel("Hostname: %s" % self.Hosts[-1])
self.OldInstance = i
def _onOK(self, event):
"""Called by OK button"""
self.EndModal(wxID_OK)
def _onCancel(self, event):
"""Called by Cancel button"""
self.EndModal(wxID_CANCEL)
def SetTileLists(self, tiles):
"""Specify list of list of tiles (x,y,w,h) to edit."""
self.TileListList = tiles
while len(self.TileListList) < self.NumLists:
self.TileListList.append( [] )
self.__LoadWidget(0)
if self.NumLists > 1:
self.instanceCtrl.SetValue(1)
def GetTileLists(self):
"""Return list of list of tiles (x,y,w,h)."""
if self.NumLists > 1:
i = self.instanceCtrl.GetValue() - 1
else:
i = 0
self.__ReadWidget(i)
return self.TileListList
|
bsd-3-clause
|
bbuchalter/python_koans
|
python3/koans/about_lists.py
|
30
|
3202
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutArrays in the Ruby Koans
#
from runner.koan import *
class AboutLists(Koan):
def test_creating_lists(self):
empty_list = list()
self.assertEqual(list, type(empty_list))
self.assertEqual(__, len(empty_list))
def test_list_literals(self):
nums = list()
self.assertEqual([], nums)
nums[0:] = [1]
self.assertEqual([1], nums)
nums[1:] = [2]
self.assertListEqual([1, __], nums)
nums.append(333)
self.assertListEqual([1, 2, __], nums)
def test_accessing_list_elements(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(__, noms[0])
self.assertEqual(__, noms[3])
self.assertEqual(__, noms[-1])
self.assertEqual(__, noms[-3])
def test_slicing_lists(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(__, noms[0:1])
self.assertEqual(__, noms[0:2])
self.assertEqual(__, noms[2:2])
self.assertEqual(__, noms[2:20])
self.assertEqual(__, noms[4:0])
self.assertEqual(__, noms[4:100])
self.assertEqual(__, noms[5:0])
def test_slicing_to_the_edge(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(__, noms[2:])
self.assertEqual(__, noms[:2])
def test_lists_and_ranges(self):
self.assertEqual(range, type(range(5)))
self.assertNotEqual([1, 2, 3, 4, 5], range(1,6))
self.assertEqual(__, list(range(5)))
self.assertEqual(__, list(range(5, 9)))
def test_ranges_with_steps(self):
self.assertEqual(__, list(range(0, 8, 2)))
self.assertEqual(__, list(range(1, 8, 3)))
self.assertEqual(__, list(range(5, -7, -4)))
self.assertEqual(__, list(range(5, -8, -4)))
def test_insertions(self):
knight = ['you', 'shall', 'pass']
knight.insert(2, 'not')
self.assertEqual(__, knight)
knight.insert(0, 'Arthur')
self.assertEqual(__, knight)
def test_popping_lists(self):
stack = [10, 20, 30, 40]
stack.append('last')
self.assertEqual(__, stack)
popped_value = stack.pop()
self.assertEqual(__, popped_value)
self.assertEqual(__, stack)
popped_value = stack.pop(1)
self.assertEqual(__, popped_value)
self.assertEqual(__, stack)
# Notice that there is a "pop" but no "push" in python?
# Part of the Python philosophy is that there ideally should be one and
# only one way of doing anything. A 'push' is the same as an 'append'.
# To learn more about this try typing "import this" from the python
# console... ;)
def test_making_queues(self):
queue = [1, 2]
queue.append('last')
self.assertEqual(__, queue)
popped_value = queue.pop(0)
self.assertEqual(__, popped_value)
self.assertEqual(__, queue)
# Note, for Python 2 popping from the left hand side of a list is
# inefficient. Use collections.deque instead.
# This is not an issue for Python 3 though
|
mit
|
mananam/pelican-prajna
|
prajna/readers.py
|
1
|
2368
|
# -*- coding: utf-8 -*-
"""Readers for Sloka content."""
from __future__ import unicode_literals, print_function
import json
import logging
from pelican.readers import BaseReader
logger = logging.getLogger(__name__)
class SlokaReader(BaseReader):
"""A commonmarkdown based reader for sanskrit verses.
Uses following additional markup to extract special context:
<!-- Verse Metadata -->
Key: Value
<!-- Verse -->
~~~sloka
line1
line2
line3
~~~
~~~padachhed
word1 word2
word2a word2b
word3a
~~~
~~~anvaya
word2a word1 word3a word2 word2b
~~~
TODO sloka, padachhed, anvaya are styled differently.
"""
enabled = True
file_extensions = ['md']
extensions = None
def __init__(self, *args, **kwargs):
"""Create an instance of SlokaReader."""
logger.debug("SlokaReader: Initialize")
super(SlokaReader, self).__init__(*args, **kwargs)
def read(self, source_path):
"""Parse the json content in a file.
Extract metadata and content from a JSON file.
Returns:
string, dict: content of the file as string, dictionary of metadata
"""
logger.debug("SlokaReader: Read: %s", source_path)
import CommonMark
metadata = {}
content = {}
with open(source_path) as f:
# parse the metadata
line = f.readline()
keyval = line.rstrip().split(":", 1)
while len(keyval) == 2:
metadata[keyval[0]] = keyval[1]
line = f.readline()
keyval = line.rstrip().split(":", 1)
parser = CommonMark.Parser()
ast = parser.parse(line + f.read())
walker = ast.walker()
event = walker.nxt()
while event is not None:
node = event["node"]
if self._is_valid_node(node):
content[node.info] = node.literal.rstrip()
event = walker.nxt()
json_content = json.dumps(content)
return json_content, metadata
# def process_metadata(self, name, value):
# return value
def _is_valid_node(self, node):
if node.t == "code_block":
return True
return False
|
mit
|
fengshao0907/fbthrift
|
thrift/lib/py/server/TServer.py
|
14
|
13984
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sys
import os
import threading
if sys.version_info[0] >= 3:
import queue
Queue = queue
else:
import Queue
import warnings
from thrift.Thrift import TProcessor, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol.THeaderProtocol import THeaderProtocolFactory
class TConnectionContext:
def getPeerName(self):
"""Gets the address of the client.
Returns:
The equivalent value of socket.getpeername() on the client socket
"""
raise NotImplementedError
class TRpcConnectionContext(TConnectionContext):
"""Connection context class for thrift RPC calls"""
def __init__(self, client_socket, iprot=None, oprot=None):
"""Initializer.
Arguments:
client_socket: the TSocket to the client
"""
self._client_socket = client_socket
self.iprot = iprot
self.oprot = oprot
def setProtocols(self, iprot, oprot):
self.iprot = iprot
self.oprot = oprot
def getPeerName(self):
"""Gets the address of the client.
Returns:
Same value as socket.peername() for the TSocket
"""
return self._client_socket.getPeerName()
class TServerEventHandler:
"""Event handler base class.
Override selected methods on this class to implement custom event handling
"""
def preServe(self, address):
"""Called before the server begins.
Arguments:
address: the address that the server is listening on
"""
pass
def newConnection(self, context):
"""Called when a client has connected and is about to begin processing.
Arguments:
context: instance of TRpcConnectionContext
"""
pass
def clientBegin(self, iprot, oprot):
"""Deprecated: Called when a new connection is made to the server.
For all servers other than TNonblockingServer, this function is called
whenever newConnection is called and vice versa. This is the old-style
for event handling and is not supported for TNonblockingServer. New
code should always use the newConnection method.
"""
pass
def connectionDestroyed(self, context):
"""Called when a client has finished request-handling.
Arguments:
context: instance of TRpcConnectionContext
"""
pass
class TServer:
"""Base interface for a server, which must have a serve method."""
""" constructors for all servers:
1) (processor, serverTransport)
2) (processor, serverTransport, transportFactory, protocolFactory)
3) (processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
Optionally, the handler can be passed instead of the processor,
and a processor will be created automatically:
4) (handler, serverTransport)
5) (handler, serverTransport, transportFacotry, protocolFactory)
6) (handler, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
The attribute serverEventHandler (default: None) receives
callbacks for various events in the server lifecycle. It should
be set to an instance of TServerEventHandler.
"""
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3],
args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4],
args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = self._getProcessor(processor)
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
self.serverEventHandler = TServerEventHandler()
def _getProcessor(self, processor):
""" Check if a processor is really a processor, or if it is a handler
auto create a processor for it """
if isinstance(processor, TProcessor):
return processor
elif hasattr(processor, "_processor_type"):
handler = processor
return handler._processor_type(handler)
else:
raise TApplicationException(
message="Could not detect processor type")
def setServerEventHandler(self, handler):
self.serverEventHandler = handler
def _clientBegin(self, context, iprot, oprot):
self.serverEventHandler.newConnection(context)
self.serverEventHandler.clientBegin(iprot, oprot)
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
if isinstance(self.inputProtocolFactory, THeaderProtocolFactory):
oprot = iprot
else:
oprot = self.outputProtocolFactory.getProtocol(otrans)
context = TRpcConnectionContext(client, iprot, oprot)
self._clientBegin(context, iprot, oprot)
try:
while True:
self.processor.process(iprot, oprot, context)
except TTransport.TTransportException as tx:
pass
except Exception as x:
logging.exception(x)
self.serverEventHandler.connectionDestroyed(context)
itrans.close()
otrans.close()
def serve(self):
pass
class TSimpleServer(TServer):
"""Simple single-threaded server that just pumps around one transport."""
def __init__(self, *args):
warnings.warn("TSimpleServer is deprecated. Please use one of "
"Nonblocking, Twisted, or Gevent server instead.",
DeprecationWarning)
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
client = self.serverTransport.accept()
self.handle(client)
class TThreadedServer(TServer):
"""Threaded server that spawns a new thread per each connection."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.daemon = kwargs.get("daemon", False)
def serve(self):
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
try:
client = self.serverTransport.accept()
t = threading.Thread(target=self.handle, args=(client,))
t.daemon = self.daemon
t.start()
except KeyboardInterrupt:
raise
except Exception as x:
logging.exception(x)
class TThreadPoolServer(TServer):
"""Server with a fixed size pool of threads which service requests."""
def __init__(self, *args, **kwargs):
warnings.warn("TThreadPoolServer is deprecated. Please use one of "
"Nonblocking, Twisted, or Gevent server instead.",
DeprecationWarning)
TServer.__init__(self, *args)
queue_size = kwargs.get("queueSize", 0)
self.clients = Queue.Queue(queue_size)
self.threads = 10
self.daemon = kwargs.get("daemon", False)
self.timeout = kwargs.get("timeout", None)
def setNumThreads(self, num):
"""Set the number of worker threads that should be created"""
self.threads = num
def serveThread(self):
"""
Loop around getting clients from the shared queue and process them.
"""
while True:
try:
client = self.clients.get()
if self.timeout:
client.setTimeout(self.timeout)
self.handle(client)
except Exception as x:
logging.exception(x)
def serve(self):
"""
Start a fixed number of worker threads and put client into a queue
"""
for i in range(self.threads):
try:
t = threading.Thread(target=self.serveThread)
t.daemon = self.daemon
t.start()
except Exception as x:
logging.exception(x)
# Pump the socket for clients
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
client = None
try:
client = self.serverTransport.accept()
self.clients.put(client)
except Exception as x:
logging.exception(x)
if client:
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
itrans.close()
otrans.close()
class TForkingServer(TServer):
"""A Thrift server that forks a new process for each request"""
"""
This is more scalable than the threaded server as it does not cause
GIL contention.
Note that this has different semantics from the threading server.
Specifically, updates to shared variables will no longer be shared.
It will also not work on windows.
This code is heavily inspired by SocketServer.ForkingMixIn in the
Python stdlib.
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.children = []
def serve(self):
def tryClose(file):
try:
file.close()
except IOError as e:
logging.warning(e, exc_info=True)
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
client = self.serverTransport.accept()
try:
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
if isinstance(self.inputProtocolFactory,
THeaderProtocolFactory):
oprot = iprot
else:
oprot = self.outputProtocolFactory.getProtocol(otrans)
context = TRpcConnectionContext(client, iprot, oprot)
self._clientBegin(context, iprot, oprot)
pid = os.fork()
if pid: # parent
# add before collect, otherwise you race w/ waitpid
self.children.append(pid)
self._collectChildren()
# Parent must close socket or the connection may not get
# closed promptly
tryClose(itrans)
tryClose(otrans)
else:
ecode = 0
try:
try:
while True:
self.processor.process(iprot, oprot, context)
except TTransport.TTransportException as tx:
pass
except Exception as e:
logging.exception(e)
ecode = 1
finally:
self.serverEventHandler.connectionDestroyed(context)
tryClose(itrans)
tryClose(otrans)
os._exit(ecode)
except TTransport.TTransportException as tx:
pass
except Exception as x:
logging.exception(x)
def _collectChildren(self):
while self.children:
try:
pid, status = os.waitpid(0, os.WNOHANG)
except os.error:
pid = None
if pid:
self.children.remove(pid)
else:
break
|
apache-2.0
|
onestarshang/flask_super_config
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/__init__.py
|
482
|
2055
|
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov ([email protected])'
__license__ = 'MIT'
__version__ = '1.10.4'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
import warnings
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
|
gpl-2.0
|
weizhenwei/wireshark
|
tools/wireshark_gen.py
|
11
|
100064
|
# -*- python -*-
#
# wireshark_gen.py (part of idl2wrs)
#
# Author : Frank Singleton ([email protected])
#
# Copyright (C) 2001 Frank Singleton, Ericsson Inc.
#
# This file is a backend to "omniidl", used to generate "Wireshark"
# dissectors from CORBA IDL descriptions. The output language generated
# is "C". It will generate code to use the GIOP/IIOP get_CDR_XXX API.
#
# Please see packet-giop.h in Wireshark distro for API description.
# Wireshark is available at http://www.wireshark.org/
#
# Omniidl is part of the OmniOrb distribution, and is available at
# http://omniorb.sourceforge.net
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
# Description:
#
# Omniidl Back-end which parses an IDL list of "Operation" nodes
# passed from wireshark_be2.py and generates "C" code for compiling
# as a plugin for the Wireshark IP Protocol Analyser.
#
#
# Strategy (sneaky but ...)
#
# problem: I dont know what variables to declare until AFTER the helper functions
# have been built, so ...
#
# There are 2 passes through genHelpers, the first one is there just to
# make sure the fn_hash data struct is populated properly.
# The second pass is the real thing, generating code and declaring
# variables (from the 1st pass) properly.
#
"""Wireshark IDL compiler back-end."""
from omniidl import idlast, idltype, idlutil, output
import sys, string
import tempfile
#
# Output class, generates "C" src code for the sub-dissector
#
# in:
#
#
# self - me
# st - output stream
# node - a reference to an Operations object.
# name - scoped name (Module::Module::Interface:: .. ::Operation
#
#
# TODO -- FS
#
# 1. generate hf[] data for searchable fields (but what is searchable?) [done, could be improved]
# 2. add item instead of add_text() [done]
# 3. sequence handling [done]
# 4. User Exceptions [done]
# 5. Fix arrays, and structs containing arrays [done]
# 6. Handle pragmas.
# 7. Exception can be common to many operations, so handle them outside the
# operation helper functions [done]
# 8. Automatic variable declaration [done, improve, still get some collisions.add variable delegator function ]
# For example, mutlidimensional arrays.
# 9. wchar and wstring handling [giop API needs improving]
# 10. Support Fixed [done]
# 11. Support attributes (get/set) [started, needs language mapping option, perhaps wireshark GUI option
# to set the attribute function prefix or suffix ? ] For now the prefix is "_get" and "_set"
# eg: attribute string apple => _get_apple and _set_apple
#
# 12. Implement IDL "union" code [done]
# 13. Implement support for plugins [done]
# 14. Dont generate code for empty operations (cf: exceptions without members)
# 15. Generate code to display Enums numerically and symbolically [done]
# 16. Place structs/unions in subtrees
# 17. Recursive struct and union handling [done]
# 18. Improve variable naming for display (eg: structs, unions etc) [done]
#
# Also test, Test, TEST
#
#
# Strategy:
# For every operation and attribute do
# For return val and all parameters do
# find basic IDL type for each parameter
# output get_CDR_xxx
# output exception handling code
# output attribute handling code
#
#
class wireshark_gen_C:
#
# Turn DEBUG stuff on/off
#
DEBUG = 0
#
# Some string constants for our templates
#
c_u_octet8 = "guint64 u_octet8;"
c_s_octet8 = "gint64 s_octet8;"
c_u_octet4 = "guint32 u_octet4;"
c_s_octet4 = "gint32 s_octet4;"
c_u_octet2 = "guint16 u_octet2;"
c_s_octet2 = "gint16 s_octet2;"
c_u_octet1 = "guint8 u_octet1;"
c_s_octet1 = "gint8 s_octet1;"
c_float = "gfloat my_float;"
c_double = "gdouble my_double;"
c_seq = "const gchar *seq = NULL;" # pointer to buffer of gchars
c_i = "guint32 i_"; # loop index
c_i_lim = "guint32 u_octet4_loop_"; # loop limit
c_u_disc = "guint32 disc_u_"; # unsigned int union discriminant variable name (enum)
c_s_disc = "gint32 disc_s_"; # signed int union discriminant variable name (other cases, except Enum)
#
# Constructor
#
def __init__(self, st, protocol_name, dissector_name ,description):
self.st = output.Stream(tempfile.TemporaryFile(),4) # for first pass only
self.st_save = st # where 2nd pass should go
self.protoname = protocol_name # Protocol Name (eg: ECHO)
self.dissname = dissector_name # Dissector name (eg: echo)
self.description = description # Detailed Protocol description (eg: Echo IDL Example)
self.exlist = [] # list of exceptions used in operations.
#self.curr_sname # scoped name of current opnode or exnode I am visiting, used for generating "C" var declares
self.fn_hash = {} # top level hash to contain key = function/exception and val = list of variable declarations
# ie a hash of lists
self.fn_hash_built = 0 # flag to indicate the 1st pass is complete, and the fn_hash is correctly
# populated with operations/vars and exceptions/vars
#
# genCode()
#
# Main entry point, controls sequence of
# generated code.
#
#
def genCode(self,oplist, atlist, enlist, stlist, unlist): # operation,attribute,enums,struct and union lists
self.genHelpers(oplist,stlist,unlist) # sneaky .. call it now, to populate the fn_hash
# so when I come to that operation later, I have the variables to
# declare already.
self.genExceptionHelpers(oplist) # sneaky .. call it now, to populate the fn_hash
# so when I come to that exception later, I have the variables to
# declare already.
self.genAttributeHelpers(atlist) # sneaky .. call it now, to populate the fn_hash
# so when I come to that exception later, I have the variables to
# declare already.
self.fn_hash_built = 1 # DONE, so now I know , see genOperation()
self.st = self.st_save
self.genHeader() # initial dissector comments
self.genEthCopyright() # Wireshark Copyright comments.
self.genGPL() # GPL license
self.genIncludes()
self.genPrototype()
self.genProtocol()
self.genDeclares(oplist,atlist,enlist,stlist,unlist)
if (len(atlist) > 0):
self.genAtList(atlist) # string constant declares for Attributes
if (len(enlist) > 0):
self.genEnList(enlist) # string constant declares for Enums
self.genExceptionHelpers(oplist) # helper function to decode user exceptions that have members
self.genExceptionDelegator(oplist) # finds the helper function to decode a user exception
if (len(atlist) > 0):
self.genAttributeHelpers(atlist) # helper function to decode "attributes"
self.genHelpers(oplist,stlist,unlist) # operation, struct and union decode helper functions
self.genMainEntryStart(oplist)
self.genOpDelegator(oplist)
self.genAtDelegator(atlist)
self.genMainEntryEnd()
self.gen_proto_register(oplist, atlist, stlist, unlist)
self.gen_proto_reg_handoff(oplist)
# All the dissectors are now built-in
#self.gen_plugin_register()
#self.dumpvars() # debug
self.genModelines();
#
# genHeader
#
# Generate Standard Wireshark Header Comments
#
#
def genHeader(self):
self.st.out(self.template_Header,dissector_name=self.dissname)
if self.DEBUG:
print "XXX genHeader"
#
# genEthCopyright
#
# Wireshark Copyright Info
#
#
def genEthCopyright(self):
if self.DEBUG:
print "XXX genEthCopyright"
self.st.out(self.template_wireshark_copyright)
#
# genModelines
#
# Modelines info
#
#
def genModelines(self):
if self.DEBUG:
print "XXX genModelines"
self.st.out(self.template_Modelines)
#
# genGPL
#
# GPL license
#
#
def genGPL(self):
if self.DEBUG:
print "XXX genGPL"
self.st.out(self.template_GPL)
#
# genIncludes
#
# GPL license
#
#
def genIncludes(self):
if self.DEBUG:
print "XXX genIncludes"
self.st.out(self.template_Includes)
#
# genOpDeclares()
#
# Generate hf variables for operation filters
#
# in: opnode ( an operation node)
#
def genOpDeclares(self, op):
if self.DEBUG:
print "XXX genOpDeclares"
print "XXX return type = " , op.returnType().kind()
sname = self.namespace(op, "_")
rt = op.returnType()
if (rt.kind() != idltype.tk_void):
if (rt.kind() == idltype.tk_alias): # a typdef return val possibly ?
#self.get_CDR_alias(rt, rt.name() )
if (rt.unalias().kind() == idltype.tk_sequence):
self.st.out(self.template_hf, name=sname + "_return_loop")
if (self.isSeqNativeType(rt.unalias().seqType())):
self.st.out(self.template_hf, name=sname + "_return")
elif ((rt.unalias().kind() != idltype.tk_struct) and \
(rt.unalias().kind() != idltype.tk_objref) and \
(rt.unalias().kind() != idltype.tk_any)):
self.st.out(self.template_hf, name=sname + "_return")
elif ((rt.kind() != idltype.tk_struct) and \
(rt.kind() != idltype.tk_objref) and \
(rt.kind() != idltype.tk_union) and \
(rt.kind() != idltype.tk_any)):
self.st.out(self.template_hf, name=sname + "_return")
for p in op.parameters():
if (p.paramType().unalias().kind() == idltype.tk_sequence):
self.st.out(self.template_hf, name=sname + "_" + p.identifier() + "_loop")
if (self.isSeqNativeType(p.paramType().unalias().seqType())):
self.st.out(self.template_hf, name=sname + "_" + p.identifier())
elif ((p.paramType().unalias().kind() != idltype.tk_any) and \
(p.paramType().unalias().kind() != idltype.tk_struct) and \
(p.paramType().unalias().kind() != idltype.tk_objref) and \
(p.paramType().unalias().kind() != idltype.tk_union)):
self.st.out(self.template_hf, name=sname + "_" + p.identifier())
#
# genAtDeclares()
#
# Generate hf variables for attributes
#
# in: at ( an attribute)
#
def genAtDeclares(self, at):
if self.DEBUG:
print "XXX genAtDeclares"
for decl in at.declarators():
sname = self.namespace(decl, "_")
self.st.out(self.template_hf, name="get" + "_" + sname + "_" + decl.identifier())
if not at.readonly():
self.st.out(self.template_hf, name="set" + "_" + sname + "_" + decl.identifier())
#
# genStDeclares()
#
# Generate hf variables for structs
#
# in: st ( a struct)
#
def genStDeclares(self, st):
if self.DEBUG:
print "XXX genStDeclares"
sname = self.namespace(st, "_")
for m in st.members():
if ((self.isSeqNativeType(m.memberType())) or (m.memberType().unalias().kind() == idltype.tk_sequence)):
for decl in m.declarators():
if (m.memberType().unalias().kind() == idltype.tk_sequence):
self.st.out(self.template_hf, name=sname + "_" + decl.identifier() + "_loop")
if (self.isSeqNativeType(m.memberType().unalias().seqType())):
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
else:
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
#
# genExDeclares()
#
# Generate hf variables for user exception filters
#
# in: exnode ( an exception node)
#
def genExDeclares(self,ex):
if self.DEBUG:
print "XXX genExDeclares"
sname = self.namespace(ex, "_")
for m in ex.members():
for decl in m.declarators():
if (m.memberType().unalias().kind() == idltype.tk_sequence):
self.st.out(self.template_hf, name=sname + "_" + decl.identifier() + "_loop")
else:
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
#
# genUnionDeclares()
#
# Generate hf variables for union filters
#
# in: un ( an union)
#
def genUnionDeclares(self,un):
if self.DEBUG:
print "XXX genUnionDeclares"
sname = self.namespace(un, "_")
self.st.out(self.template_hf, name=sname + "_" + un.identifier())
for uc in un.cases(): # for all UnionCase objects in this union
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
if (uc.caseType().unalias().kind() == idltype.tk_sequence):
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier() + "_loop")
if (self.isSeqNativeType(uc.caseType().unalias().seqType())):
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier())
elif (self.isSeqNativeType(uc.caseType())):
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier())
#
# genExpertInfoDeclares()
#
# Generate ei variables for expert info filters
#
def genExpertInfoDeclares(self):
if self.DEBUG:
print "XXX genExpertInfoDeclares"
self.st.out(self.template_proto_register_ei_filters, dissector_name=self.dissname)
#
# genDeclares
#
# generate function prototypes if required
#
# Currently this is used for struct and union helper function declarations.
#
def genDeclares(self,oplist,atlist,enlist,stlist,unlist):
if self.DEBUG:
print "XXX genDeclares"
# prototype for operation filters
self.st.out(self.template_hf_operations)
#operation specific filters
if (len(oplist) > 0):
self.st.out(self.template_proto_register_op_filter_comment)
for op in oplist:
self.genOpDeclares(op)
#attribute filters
if (len(atlist) > 0):
self.st.out(self.template_proto_register_at_filter_comment)
for at in atlist:
self.genAtDeclares(at)
#struct filters
if (len(stlist) > 0):
self.st.out(self.template_proto_register_st_filter_comment)
for st in stlist:
self.genStDeclares(st)
# exception List filters
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
if (len(exlist) > 0):
self.st.out(self.template_proto_register_ex_filter_comment)
for ex in exlist:
if (ex.members()): # only if has members
self.genExDeclares(ex)
#union filters
if (len(unlist) > 0):
self.st.out(self.template_proto_register_un_filter_comment)
for un in unlist:
self.genUnionDeclares(un)
#expert info filters
self.genExpertInfoDeclares()
# prototype for start_dissecting()
self.st.out(self.template_prototype_start_dissecting)
# struct prototypes
if len(stlist):
self.st.out(self.template_prototype_struct_start)
for st in stlist:
#print st.repoId()
sname = self.namespace(st, "_")
self.st.out(self.template_prototype_struct_body, stname=st.repoId(),name=sname)
self.st.out(self.template_prototype_struct_end)
# union prototypes
if len(unlist):
self.st.out(self.template_prototype_union_start)
for un in unlist:
sname = self.namespace(un, "_")
self.st.out(self.template_prototype_union_body, unname=un.repoId(),name=sname)
self.st.out(self.template_prototype_union_end)
#
# genPrototype
#
#
def genPrototype(self):
self.st.out(self.template_prototype, dissector_name=self.dissname)
#
# genProtocol
#
#
def genProtocol(self):
self.st.out(self.template_protocol, dissector_name=self.dissname)
self.st.out(self.template_init_boundary)
#
# genMainEntryStart
#
def genMainEntryStart(self,oplist):
self.st.out(self.template_main_dissector_start, dissname=self.dissname, disprot=self.protoname)
self.st.inc_indent()
self.st.out(self.template_main_dissector_switch_msgtype_start)
self.st.out(self.template_main_dissector_switch_msgtype_start_request_reply)
self.st.inc_indent()
#
# genMainEntryEnd
#
def genMainEntryEnd(self):
self.st.out(self.template_main_dissector_switch_msgtype_end_request_reply)
self.st.dec_indent()
self.st.out(self.template_main_dissector_switch_msgtype_all_other_msgtype)
self.st.dec_indent()
self.st.out(self.template_main_dissector_end)
#
# genAtList
#
# in: atlist
#
# out: C code for IDL attribute decalarations.
#
# NOTE: Mapping of attributes to operation(function) names is tricky.
#
# The actual accessor function names are language-mapping specific. The attribute name
# is subject to OMG IDL's name scoping rules; the accessor function names are
# guaranteed not to collide with any legal operation names specifiable in OMG IDL.
#
# eg:
#
# static const char get_Penguin_Echo_get_width_at[] = "get_width" ;
# static const char set_Penguin_Echo_set_width_at[] = "set_width" ;
#
# or:
#
# static const char get_Penguin_Echo_get_width_at[] = "_get_width" ;
# static const char set_Penguin_Echo_set_width_at[] = "_set_width" ;
#
# TODO: Implement some language dependant templates to handle naming conventions
# language <=> attribute. for C, C++. Java etc
#
# OR, just add a runtime GUI option to select language binding for attributes -- FS
#
#
#
# ie: def genAtlist(self,atlist,language)
#
def genAtList(self,atlist):
self.st.out(self.template_comment_attributes_start)
for n in atlist:
for i in n.declarators(): #
sname = self.namespace(i, "_")
atname = i.identifier()
self.st.out(self.template_attributes_declare_Java_get, sname=sname, atname=atname)
if not n.readonly():
self.st.out(self.template_attributes_declare_Java_set, sname=sname, atname=atname)
self.st.out(self.template_comment_attributes_end)
#
# genEnList
#
# in: enlist
#
# out: C code for IDL Enum decalarations using "static const value_string" template
#
def genEnList(self,enlist):
self.st.out(self.template_comment_enums_start)
for enum in enlist:
sname = self.namespace(enum, "_")
self.st.out(self.template_comment_enum_comment, ename=enum.repoId())
self.st.out(self.template_value_string_start, valstringname=sname)
for enumerator in enum.enumerators():
self.st.out(self.template_value_string_entry, intval=str(self.valFromEnum(enum,enumerator)), description=enumerator.identifier())
#atname = n.identifier()
self.st.out(self.template_value_string_end, valstringname=sname)
self.st.out(self.template_comment_enums_end)
#
# genExceptionDelegator
#
# in: oplist
#
# out: C code for User exception delegator
#
# eg:
#
#
def genExceptionDelegator(self,oplist):
self.st.out(self.template_main_exception_delegator_start)
self.st.inc_indent()
exlist = self.get_exceptionList(oplist) # grab list of ALL UNIQUE exception nodes
for ex in exlist:
if self.DEBUG:
print "XXX Exception " , ex.repoId()
print "XXX Exception Identifier" , ex.identifier()
print "XXX Exception Scoped Name" , ex.scopedName()
if (ex.members()): # only if has members
sname = self.namespace(ex, "_")
exname = ex.repoId()
self.st.out(self.template_ex_delegate_code, sname=sname, exname=ex.repoId())
self.st.dec_indent()
self.st.out(self.template_main_exception_delegator_end)
#
# genAttribueHelpers()
#
# Generate private helper functions to decode Attributes.
#
# in: atlist
#
# For readonly attribute - generate get_xxx()
# If NOT readonly attribute - also generate set_xxx()
#
def genAttributeHelpers(self,atlist):
if self.DEBUG:
print "XXX genAttributeHelpers: atlist = ", atlist
self.st.out(self.template_attribute_helpers_start)
for attrib in atlist:
for decl in attrib.declarators():
self.genAtHelper(attrib,decl,"get") # get accessor
if not attrib.readonly():
self.genAtHelper(attrib,decl,"set") # set accessor
self.st.out(self.template_attribute_helpers_end)
#
# genAtHelper()
#
# Generate private helper functions to decode an attribute
#
# in: at - attribute node
# in: decl - declarator belonging to this attribute
# in: order - to generate a "get" or "set" helper
def genAtHelper(self,attrib,decl,order):
if self.DEBUG:
print "XXX genAtHelper"
sname = order + "_" + self.namespace(decl, "_") # must use set or get prefix to avoid collision
self.curr_sname = sname # update current opnode/exnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_attribute_helper_function_start, sname=sname, atname=decl.repoId())
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
self.getCDR(attrib.attrType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_attribute_helper_function_end)
#
# genExceptionHelpers()
#
# Generate private helper functions to decode Exceptions used
# within operations
#
# in: oplist
#
def genExceptionHelpers(self,oplist):
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
if self.DEBUG:
print "XXX genExceptionHelpers: exlist = ", exlist
self.st.out(self.template_exception_helpers_start)
for ex in exlist:
if (ex.members()): # only if has members
#print "XXX Exception = " + ex.identifier()
self.genExHelper(ex)
self.st.out(self.template_exception_helpers_end)
#
# genExhelper()
#
# Generate private helper functions to decode User Exceptions
#
# in: exnode ( an exception node)
#
def genExHelper(self,ex):
if self.DEBUG:
print "XXX genExHelper"
sname = self.namespace(ex, "_")
self.curr_sname = sname # update current opnode/exnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_exception_helper_function_start, sname=sname, exname=ex.repoId())
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
for m in ex.members():
if self.DEBUG:
print "XXX genExhelper, member = ", m, "member type = ", m.memberType()
for decl in m.declarators():
if self.DEBUG:
print "XXX genExhelper, d = ", decl
if decl.sizes(): # an array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=decl.identifier(), asize=string_indices)
self.st.out(self.template_get_CDR_array_start, aname=decl.identifier(), aval=string_indices)
self.addvar(self.c_i + decl.identifier() + ";")
self.st.inc_indent()
self.getCDR(m.memberType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end)
else:
self.getCDR(m.memberType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_exception_helper_function_end)
#
# genHelpers()
#
# Generate private helper functions for each IDL operation.
# Generate private helper functions for each IDL struct.
# Generate private helper functions for each IDL union.
#
#
# in: oplist, stlist, unlist
#
def genHelpers(self,oplist,stlist,unlist):
for op in oplist:
self.genOperation(op)
for st in stlist:
self.genStructHelper(st)
for un in unlist:
self.genUnionHelper(un)
#
# genOperation()
#
# Generate private helper functions for a specificIDL operation.
#
# in: opnode
#
def genOperation(self,opnode):
if self.DEBUG:
print "XXX genOperation called"
sname = self.namespace(opnode, "_")
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.curr_sname = sname # update current opnode's scoped name
opname = opnode.identifier()
self.st.out(self.template_helper_function_comment, repoid=opnode.repoId() )
self.st.out(self.template_helper_function_start, sname=sname)
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
self.st.out(self.template_helper_switch_msgtype_start)
self.st.out(self.template_helper_switch_msgtype_request_start)
self.st.inc_indent()
self.genOperationRequest(opnode)
self.st.out(self.template_helper_switch_msgtype_request_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_start)
self.st.inc_indent()
self.st.out(self.template_helper_switch_rep_status_start)
self.st.out(self.template_helper_switch_msgtype_reply_no_exception_start)
self.st.inc_indent()
self.genOperationReply(opnode)
self.st.out(self.template_helper_switch_msgtype_reply_no_exception_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_user_exception_start)
self.st.inc_indent()
self.genOpExceptions(opnode)
self.st.out(self.template_helper_switch_msgtype_reply_user_exception_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_default_start, dissector_name=self.dissname)
self.st.out(self.template_helper_switch_msgtype_reply_default_end)
self.st.out(self.template_helper_switch_rep_status_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_default_start, dissector_name=self.dissname)
self.st.out(self.template_helper_switch_msgtype_default_end)
self.st.out(self.template_helper_switch_msgtype_end)
self.st.dec_indent()
self.st.out(self.template_helper_function_end, sname=sname)
#
# Decode function parameters for a GIOP request message
#
#
def genOperationRequest(self,opnode):
for p in opnode.parameters():
if p.is_in():
if self.DEBUG:
print "XXX parameter = " ,p
print "XXX parameter type = " ,p.paramType()
print "XXX parameter type kind = " ,p.paramType().kind()
self.getCDR(p.paramType(), self.curr_sname + "_" + p.identifier())
#
# Decode function parameters for a GIOP reply message
#
def genOperationReply(self,opnode):
rt = opnode.returnType() # get return type
if self.DEBUG:
print "XXX genOperationReply"
print "XXX opnode = " , opnode
print "XXX return type = " , rt
print "XXX return type.unalias = " , rt.unalias()
print "XXX return type.kind() = " , rt.kind();
sname = self.namespace(opnode, "_")
if (rt.kind() == idltype.tk_alias): # a typdef return val possibly ?
#self.getCDR(rt.decl().alias().aliasType(),"dummy") # return value maybe a typedef
self.get_CDR_alias(rt, sname + "_return" )
#self.get_CDR_alias(rt, rt.name() )
else:
self.getCDR(rt, sname + "_return") # return value is NOT an alias
for p in opnode.parameters():
if p.is_out(): # out or inout
self.getCDR(p.paramType(), self.curr_sname + "_" + p.identifier())
#self.st.dec_indent()
def genOpExceptions(self,opnode):
for ex in opnode.raises():
if ex.members():
#print ex.members()
for m in ex.members():
t=0
#print m.memberType(), m.memberType().kind()
#
# Delegator for Operations
#
def genOpDelegator(self,oplist):
for op in oplist:
iname = "/".join(op.scopedName()[:-1])
opname = op.identifier()
sname = self.namespace(op, "_")
self.st.out(self.template_op_delegate_code, interface=iname, sname=sname, opname=opname)
#
# Delegator for Attributes
#
def genAtDelegator(self,atlist):
for a in atlist:
for i in a.declarators():
atname = i.identifier()
sname = self.namespace(i, "_")
self.st.out(self.template_at_delegate_code_get, sname=sname)
if not a.readonly():
self.st.out(self.template_at_delegate_code_set, sname=sname)
#
# Add a variable declaration to the hash of list
#
def addvar(self, var):
if not ( var in self.fn_hash[self.curr_sname] ):
self.fn_hash[self.curr_sname].append(var)
#
# Print the variable declaration from the hash of list
#
def dumpvars(self):
for fn in self.fn_hash.keys():
print "FN = " + fn
for v in self.fn_hash[fn]:
print "-> " + v
#
# Print the "C" variable declaration from the hash of list
# for a given scoped operation name (eg: tux_penguin_eat)
#
def dumpCvars(self, sname):
for v in self.fn_hash[sname]:
self.st.out(v)
#
# Given an enum node, and a enumerator node, return
# the enumerator's numerical value.
#
# eg: enum Color {red,green,blue} should return
# val = 1 for green
#
def valFromEnum(self,enumNode, enumeratorNode):
if self.DEBUG:
print "XXX valFromEnum, enumNode = ", enumNode, " from ", enumNode.repoId()
print "XXX valFromEnum, enumeratorNode = ", enumeratorNode, " from ", enumeratorNode.repoId()
if isinstance(enumeratorNode,idlast.Enumerator):
value = enumNode.enumerators().index(enumeratorNode)
return value
## tk_null = 0
## tk_void = 1
## tk_short = 2
## tk_long = 3
## tk_ushort = 4
## tk_ulong = 5
## tk_float = 6
## tk_double = 7
## tk_boolean = 8
## tk_char = 9
## tk_octet = 10
## tk_any = 11
## tk_TypeCode = 12
## tk_Principal = 13
## tk_objref = 14
## tk_struct = 15
## tk_union = 16
## tk_enum = 17
## tk_string = 18
## tk_sequence = 19
## tk_array = 20
## tk_alias = 21
## tk_except = 22
## tk_longlong = 23
## tk_ulonglong = 24
## tk_longdouble = 25
## tk_wchar = 26
## tk_wstring = 27
## tk_fixed = 28
## tk_value = 29
## tk_value_box = 30
## tk_native = 31
## tk_abstract_interface = 32
#
# isSeqNativeType()
#
# Return true for "native" datatypes that will generate a direct proto_tree_add_xxx
# call for a sequence. Used to determine if a separate hf variable is needed for
# the loop over the sequence
def isSeqNativeType(self,type):
pt = type.unalias().kind() # param CDR type
if self.DEBUG:
print "XXX isSeqNativeType: kind = " , pt
if pt == idltype.tk_ulong:
return 1
elif pt == idltype.tk_longlong:
return 1
elif pt == idltype.tk_ulonglong:
return 1
elif pt == idltype.tk_short:
return 1
elif pt == idltype.tk_long:
return 1
elif pt == idltype.tk_ushort:
return 1
elif pt == idltype.tk_float:
return 1
elif pt == idltype.tk_double:
return 1
elif pt == idltype.tk_boolean:
return 1
elif pt == idltype.tk_octet:
return 1
elif pt == idltype.tk_enum:
return 1
elif pt == idltype.tk_string:
return 1
elif pt == idltype.tk_wstring:
return 1
elif pt == idltype.tk_wchar:
return 1
elif pt == idltype.tk_char:
return 1
else:
return 0
#
# getCDR()
#
# This is the main "iterator" function. It takes a node, and tries to output
# a get_CDR_XXX accessor method(s). It can call itself multiple times
# if I find nested structures etc.
#
def getCDR(self,type,name="fred"):
pt = type.unalias().kind() # param CDR type
pn = name # param name
if self.DEBUG:
print "XXX getCDR: kind = " , pt
print "XXX getCDR: name = " , pn
if pt == idltype.tk_ulong:
self.get_CDR_ulong(pn)
elif pt == idltype.tk_longlong:
self.get_CDR_longlong(pn)
elif pt == idltype.tk_ulonglong:
self.get_CDR_ulonglong(pn)
elif pt == idltype.tk_void:
self.get_CDR_void(pn)
elif pt == idltype.tk_short:
self.get_CDR_short(pn)
elif pt == idltype.tk_long:
self.get_CDR_long(pn)
elif pt == idltype.tk_ushort:
self.get_CDR_ushort(pn)
elif pt == idltype.tk_float:
self.get_CDR_float(pn)
elif pt == idltype.tk_double:
self.get_CDR_double(pn)
elif pt == idltype.tk_fixed:
self.get_CDR_fixed(type.unalias(),pn)
elif pt == idltype.tk_boolean:
self.get_CDR_boolean(pn)
elif pt == idltype.tk_char:
self.get_CDR_char(pn)
elif pt == idltype.tk_octet:
self.get_CDR_octet(pn)
elif pt == idltype.tk_any:
self.get_CDR_any(pn)
elif pt == idltype.tk_string:
self.get_CDR_string(pn)
elif pt == idltype.tk_wstring:
self.get_CDR_wstring(pn)
elif pt == idltype.tk_wchar:
self.get_CDR_wchar(pn)
elif pt == idltype.tk_enum:
#print type.decl()
self.get_CDR_enum(pn,type)
#self.get_CDR_enum(pn)
elif pt == idltype.tk_struct:
self.get_CDR_struct(type,pn)
elif pt == idltype.tk_TypeCode: # will I ever get here ?
self.get_CDR_TypeCode(pn)
elif pt == idltype.tk_sequence:
if type.unalias().seqType().kind() == idltype.tk_octet:
self.get_CDR_sequence_octet(type,pn)
else:
self.get_CDR_sequence(type,pn)
elif pt == idltype.tk_objref:
self.get_CDR_objref(type,pn)
elif pt == idltype.tk_array:
pn = pn # Supported elsewhere
elif pt == idltype.tk_union:
self.get_CDR_union(type,pn)
elif pt == idltype.tk_alias:
if self.DEBUG:
print "XXXXX Alias type XXXXX " , type
self.get_CDR_alias(type,pn)
else:
self.genWARNING("Unknown typecode = " + '%i ' % pt) # put comment in source code
#
# get_CDR_XXX methods are here ..
#
#
def get_CDR_ulong(self,pn):
self.st.out(self.template_get_CDR_ulong, hfname=pn)
def get_CDR_short(self,pn):
self.st.out(self.template_get_CDR_short, hfname=pn)
def get_CDR_void(self,pn):
self.st.out(self.template_get_CDR_void, hfname=pn)
def get_CDR_long(self,pn):
self.st.out(self.template_get_CDR_long, hfname=pn)
def get_CDR_ushort(self,pn):
self.st.out(self.template_get_CDR_ushort, hfname=pn)
def get_CDR_float(self,pn):
self.st.out(self.template_get_CDR_float, hfname=pn)
def get_CDR_double(self,pn):
self.st.out(self.template_get_CDR_double, hfname=pn)
def get_CDR_longlong(self,pn):
self.st.out(self.template_get_CDR_longlong, hfname=pn)
def get_CDR_ulonglong(self,pn):
self.st.out(self.template_get_CDR_ulonglong, hfname=pn)
def get_CDR_boolean(self,pn):
self.st.out(self.template_get_CDR_boolean, hfname=pn)
def get_CDR_fixed(self,type,pn):
if self.DEBUG:
print "XXXX calling get_CDR_fixed, type = ", type
print "XXXX calling get_CDR_fixed, type.digits() = ", type.digits()
print "XXXX calling get_CDR_fixed, type.scale() = ", type.scale()
string_digits = '%i ' % type.digits() # convert int to string
string_scale = '%i ' % type.scale() # convert int to string
string_length = '%i ' % self.dig_to_len(type.digits()) # how many octets to hilight for a number of digits
self.st.out(self.template_get_CDR_fixed, varname=pn, digits=string_digits, scale=string_scale, length=string_length )
self.addvar(self.c_seq)
def get_CDR_char(self,pn):
self.st.out(self.template_get_CDR_char, hfname=pn)
def get_CDR_octet(self,pn):
self.st.out(self.template_get_CDR_octet, hfname=pn)
def get_CDR_any(self,pn):
self.st.out(self.template_get_CDR_any, varname=pn)
def get_CDR_enum(self,pn,type):
#self.st.out(self.template_get_CDR_enum, hfname=pn)
sname = self.namespace(type.unalias(), "_")
self.st.out(self.template_get_CDR_enum_symbolic, valstringarray=sname,hfname=pn)
self.addvar(self.c_u_octet4)
def get_CDR_string(self,pn):
self.st.out(self.template_get_CDR_string, hfname=pn)
def get_CDR_wstring(self,pn):
self.st.out(self.template_get_CDR_wstring, varname=pn)
self.addvar(self.c_u_octet4)
self.addvar(self.c_seq)
def get_CDR_wchar(self,pn):
self.st.out(self.template_get_CDR_wchar, varname=pn)
self.addvar(self.c_s_octet1)
self.addvar(self.c_seq)
def get_CDR_TypeCode(self,pn):
self.st.out(self.template_get_CDR_TypeCode, varname=pn)
self.addvar(self.c_u_octet4)
def get_CDR_objref(self,type,pn):
self.st.out(self.template_get_CDR_object)
def get_CDR_union(self,type,pn):
if self.DEBUG:
print "XXX Union type =" , type, " pn = ",pn
print "XXX Union type.decl()" , type.decl()
print "XXX Union Scoped Name" , type.scopedName()
# If I am a typedef union {..}; node then find the union node
if isinstance(type.decl(), idlast.Declarator):
ntype = type.decl().alias().aliasType().decl()
else:
ntype = type.decl() # I am a union node
if self.DEBUG:
print "XXX Union ntype =" , ntype
sname = self.namespace(ntype, "_")
self.st.out(self.template_union_start, name=sname )
# Output a call to the union helper function so I can handle recursive union also.
self.st.out(self.template_decode_union,name=sname)
self.st.out(self.template_union_end, name=sname )
#
# getCDR_hf()
#
# This takes a node, and tries to output the appropriate item for the
# hf array.
#
def getCDR_hf(self,type,desc,filter,hf_name="fred"):
pt = type.unalias().kind() # param CDR type
pn = hf_name # param name
if self.DEBUG:
print "XXX getCDR_hf: kind = " , pt
print "XXX getCDR_hf: name = " , pn
if pt == idltype.tk_ulong:
self.get_CDR_ulong_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_longlong:
self.get_CDR_longlong_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_ulonglong:
self.get_CDR_ulonglong_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_void:
pt = pt # no hf_ variables needed
elif pt == idltype.tk_short:
self.get_CDR_short_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_long:
self.get_CDR_long_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_ushort:
self.get_CDR_ushort_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_float:
self.get_CDR_float_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_double:
self.get_CDR_double_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_fixed:
pt = pt # no hf_ variables needed
elif pt == idltype.tk_boolean:
self.get_CDR_boolean_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_char:
self.get_CDR_char_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_octet:
self.get_CDR_octet_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_any:
pt = pt # no hf_ variables needed
elif pt == idltype.tk_string:
self.get_CDR_string_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_wstring:
self.get_CDR_wstring_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_wchar:
self.get_CDR_wchar_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_enum:
self.get_CDR_enum_hf(pn, type, desc, filter, self.dissname)
elif pt == idltype.tk_struct:
pt = pt # no hf_ variables needed (should be already contained in struct members)
elif pt == idltype.tk_TypeCode: # will I ever get here ?
self.get_CDR_TypeCode_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_sequence:
if type.unalias().seqType().kind() == idltype.tk_octet:
self.get_CDR_sequence_octet_hf(type, pn, desc, filter, self.dissname)
else:
self.get_CDR_sequence_hf(type, pn, desc, filter, self.dissname)
elif pt == idltype.tk_objref:
pt = pt # no object specific hf_ variables used, use generic ones from giop dissector
elif pt == idltype.tk_array:
pt = pt # Supported elsewhere
elif pt == idltype.tk_union:
pt = pt # no hf_ variables needed (should be already contained in union members)
elif pt == idltype.tk_alias:
if self.DEBUG:
print "XXXXX Alias type hf XXXXX " , type
self.get_CDR_alias_hf(type,pn)
else:
self.genWARNING("Unknown typecode = " + '%i ' % pt) # put comment in source code
#
# get_CDR_XXX_hf methods are here ..
#
#
def get_CDR_ulong_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_ulong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_short_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_short_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_long_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_long_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_ushort_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_ushort_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_float_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_float_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_double_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_double_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_longlong_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_longlong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_ulonglong_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_ulonglong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_boolean_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_boolean_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_char_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_char_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_octet_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_octet_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_enum_hf(self,pn,type,desc,filter,diss):
sname = self.namespace(type.unalias(), "_")
self.st.out(self.template_get_CDR_enum_symbolic_hf, valstringarray=sname,hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_string_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_string_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_wstring_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_wstring_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
# self.addvar(self.c_u_octet4)
# self.addvar(self.c_seq)
def get_CDR_wchar_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_wchar_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
# self.addvar(self.c_s_octet1)
# self.addvar(self.c_seq)
def get_CDR_TypeCode_hf(self,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_TypeCode_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_sequence_octet_hf(self,type,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_sequence_octet_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_sequence_hf(self,type,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_sequence_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
if (self.isSeqNativeType(type.unalias().seqType())):
self.getCDR_hf(type.unalias().seqType(),desc,filter,pn)
def get_CDR_alias_hf(self,type,pn):
if self.DEBUG:
print "XXX get_CDR_alias_hf, type = " ,type , " pn = " , pn
print "XXX get_CDR_alias_hf, type.decl() = " ,type.decl()
print "XXX get_CDR_alias_hf, type.decl().alias() = " ,type.decl().alias()
decl = type.decl() # get declarator object
if (decl.sizes()): # a typedef array
#indices = self.get_indices_from_sizes(decl.sizes())
#string_indices = '%i ' % indices # convert int to string
#self.st.out(self.template_get_CDR_array_comment, aname=pn, asize=string_indices)
#self.st.out(self.template_get_CDR_array_start, aname=pn, aval=string_indices)
#self.addvar(self.c_i + pn + ";")
#self.st.inc_indent()
self.getCDR_hf(type.decl().alias().aliasType(), pn )
#self.st.dec_indent()
#self.st.out(self.template_get_CDR_array_end)
else: # a simple typdef
if self.DEBUG:
print "XXX get_CDR_alias_hf, type = " ,type , " pn = " , pn
print "XXX get_CDR_alias_hf, type.decl() = " ,type.decl()
self.getCDR_hf(type, decl.identifier() )
#
# Code to generate Union Helper functions
#
# in: un - a union node
#
#
def genUnionHelper(self,un):
if self.DEBUG:
print "XXX genUnionHelper called"
print "XXX Union type =" , un
print "XXX Union type.switchType()" , un.switchType()
print "XXX Union Scoped Name" , un.scopedName()
sname = self.namespace(un, "_")
self.curr_sname = sname # update current opnode/exnode/stnode/unnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_union_helper_function_start, sname=sname, unname=un.repoId())
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
st = un.switchType().unalias() # may be typedef switch type, so find real type
self.st.out(self.template_comment_union_code_start, uname=un.repoId() )
self.getCDR(st, sname + "_" + un.identifier());
# Depending on what kind of discriminant I come accross (enum,integer,char,
# short, boolean), make sure I cast the return value of the get_XXX accessor
# to an appropriate value. Omniidl idlast.CaseLabel.value() accessor will
# return an integer, or an Enumerator object that is then converted to its
# integer equivalent.
#
#
# NOTE - May be able to skip some of this stuff, but leave it in for now -- FS
#
if (st.kind() == idltype.tk_enum):
std = st.decl()
self.st.out(self.template_comment_union_code_discriminant, uname=std.repoId() )
#count the number of cases to ensure variable is needed
num = 0
num_defaults = 0
for uc in un.cases(): # for all UnionCase objects in this union
num += len(uc.labels())
for cl in uc.labels():
if cl.default():
num_defaults += 1
if ((num != 1) or (num_defaults != 1)):
self.st.out(self.template_union_code_save_discriminant_enum, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_long):
self.st.out(self.template_union_code_save_discriminant_long, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_ulong):
self.st.out(self.template_union_code_save_discriminant_ulong, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_short):
self.st.out(self.template_union_code_save_discriminant_short, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_ushort):
self.st.out(self.template_union_code_save_discriminant_ushort, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_boolean):
self.st.out(self.template_union_code_save_discriminant_boolean, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
elif (st.kind() == idltype.tk_char):
self.st.out(self.template_union_code_save_discriminant_char, discname=un.identifier() )
self.addvar(self.c_s_disc + un.identifier() + ";")
else:
print "XXX Unknown st.kind() = ", st.kind()
#
# Loop over all cases in this union
#
for uc in un.cases(): # for all UnionCase objects in this union
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
# get integer value, even if discriminant is
# an Enumerator node
if isinstance(cl.value(),idlast.Enumerator):
if self.DEBUG:
print "XXX clv.identifier()", cl.value().identifier()
print "XXX clv.repoId()", cl.value().repoId()
print "XXX clv.scopedName()", cl.value().scopedName()
# find index of enumerator in enum declaration
# eg: RED is index 0 in enum Colors { RED, BLUE, GREEN }
clv = self.valFromEnum(std,cl.value())
else:
clv = cl.value()
#print "XXX clv = ",clv
#
# if char, dont convert to int, but put inside single quotes so that it is understood by C.
# eg: if (disc == 'b')..
#
# TODO : handle \xxx chars generically from a function or table lookup rather than
# a whole bunch of "if" statements. -- FS
if (st.kind() == idltype.tk_char):
if (clv == '\n'): # newline
string_clv = "'\\n'"
elif (clv == '\t'): # tab
string_clv = "'\\t'"
else:
string_clv = "'" + clv + "'"
else:
string_clv = '%i ' % clv
#
# If default case, then skp comparison with discriminator
#
if not cl.default():
self.st.out(self.template_comment_union_code_label_compare_start, discname=un.identifier(),labelval=string_clv )
self.st.inc_indent()
else:
self.st.out(self.template_comment_union_code_label_default_start )
self.getCDR(uc.caseType(),sname + "_" + uc.declarator().identifier())
if not cl.default():
self.st.dec_indent()
self.st.out(self.template_comment_union_code_label_compare_end )
else:
self.st.out(self.template_comment_union_code_label_default_end )
self.st.dec_indent()
self.st.out(self.template_union_helper_function_end)
#
# Currently, get_CDR_alias is geared to finding typdef
#
def get_CDR_alias(self,type,pn):
if self.DEBUG:
print "XXX get_CDR_alias, type = " ,type , " pn = " , pn
print "XXX get_CDR_alias, type.decl() = " ,type.decl()
print "XXX get_CDR_alias, type.decl().alias() = " ,type.decl().alias()
decl = type.decl() # get declarator object
if (decl.sizes()): # a typedef array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=pn, asize=string_indices)
self.st.out(self.template_get_CDR_array_start, aname=pn, aval=string_indices)
self.addvar(self.c_i + pn + ";")
self.st.inc_indent()
self.getCDR(type.decl().alias().aliasType(), pn )
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end)
else: # a simple typdef
self.getCDR(type, pn )
#
# Handle structs, including recursive
#
def get_CDR_struct(self,type,pn):
# If I am a typedef struct {..}; node then find the struct node
if isinstance(type.decl(), idlast.Declarator):
ntype = type.decl().alias().aliasType().decl()
else:
ntype = type.decl() # I am a struct node
sname = self.namespace(ntype, "_")
self.st.out(self.template_structure_start, name=sname )
# Output a call to the struct helper function so I can handle recursive structs also.
self.st.out(self.template_decode_struct,name=sname)
self.st.out(self.template_structure_end, name=sname )
#
# genStructhelper()
#
# Generate private helper functions to decode a struct
#
# in: stnode ( a struct node)
#
def genStructHelper(self,st):
if self.DEBUG:
print "XXX genStructHelper"
sname = self.namespace(st, "_")
self.curr_sname = sname # update current opnode/exnode/stnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_struct_helper_function_start, sname=sname, stname=st.repoId())
self.st.inc_indent()
if (len(self.fn_hash[sname]) > 0):
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end )
for m in st.members():
for decl in m.declarators():
if decl.sizes(): # an array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=decl.identifier(), asize=string_indices)
self.st.out(self.template_get_CDR_array_start, aname=decl.identifier(), aval=string_indices)
self.addvar(self.c_i + decl.identifier() + ";")
self.st.inc_indent()
self.getCDR(m.memberType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end)
else:
self.getCDR(m.memberType(), sname + "_" + decl.identifier() )
self.st.dec_indent()
self.st.out(self.template_struct_helper_function_end)
#
# Generate code to access a sequence of a type
#
def get_CDR_sequence(self,type,pn):
self.st.out(self.template_get_CDR_sequence_length, seqname=pn )
self.st.out(self.template_get_CDR_sequence_loop_start, seqname=pn )
self.addvar(self.c_i_lim + pn + ";" )
self.addvar(self.c_i + pn + ";")
self.st.inc_indent()
self.getCDR(type.unalias().seqType(), pn ) # and start all over with the type
self.st.dec_indent()
self.st.out(self.template_get_CDR_sequence_loop_end)
#
# Generate code to access a sequence of octet
#
def get_CDR_sequence_octet(self,type, pn):
self.st.out(self.template_get_CDR_sequence_length, seqname=pn)
self.st.out(self.template_get_CDR_sequence_octet, seqname=pn)
self.addvar(self.c_i_lim + pn + ";")
self.addvar("gchar * binary_seq_" + pn + ";")
self.addvar("gchar * text_seq_" + pn + ";")
#
# namespace()
#
# in - op node
#
# out - scoped operation name, using sep character instead of "::"
#
# eg: Penguin::Echo::echoWString => Penguin_Echo_echoWString if sep = "_"
#
#
def namespace(self,node,sep):
sname = string.replace(idlutil.ccolonName(node.scopedName()), '::', sep)
#print "XXX namespace: sname = " + sname
return sname
#
# generate code for plugin initialisation
#
def gen_plugin_register(self):
self.st.out(self.template_plugin_register, description=self.description, protocol_name=self.protoname, dissector_name=self.dissname)
#
# generate register_giop_user_module code, and register only
# unique interfaces that contain operations. Also output
# a heuristic register in case we want to use that.
#
# TODO - make this a command line option
#
# -e explicit
# -h heuristic
#
def gen_proto_reg_handoff(self, oplist):
self.st.out(self.template_proto_reg_handoff_start, dissector_name=self.dissname)
self.st.inc_indent()
for iname in self.get_intlist(oplist):
self.st.out(self.template_proto_reg_handoff_body, dissector_name=self.dissname, protocol_name=self.protoname, interface=iname )
self.st.out(self.template_proto_reg_handoff_heuristic, dissector_name=self.dissname, protocol_name=self.protoname)
self.st.dec_indent()
self.st.out(self.template_proto_reg_handoff_end)
#
# generate hf_ array element for operation, attribute, enums, struct and union lists
#
def genOp_hf(self,op):
sname = self.namespace(op, "_")
opname = sname[string.find(sname, "_")+1:]
opname = opname[:string.find(opname, "_")]
rt = op.returnType()
if (rt.kind() != idltype.tk_void):
if (rt.kind() == idltype.tk_alias): # a typdef return val possibly ?
self.getCDR_hf(rt, rt.name(),\
opname + "." + op.identifier() + ".return", sname + "_return")
else:
self.getCDR_hf(rt, "Return value",\
opname + "." + op.identifier() + ".return", sname + "_return")
for p in op.parameters():
self.getCDR_hf(p.paramType(), p.identifier(),\
opname + "." + op.identifier() + "." + p.identifier(), sname + "_" + p.identifier())
def genAt_hf(self,at):
for decl in at.declarators():
sname = self.namespace(decl, "_")
atname = sname[string.find(sname, "_")+1:]
atname = atname[:string.find(atname, "_")]
self.getCDR_hf(at.attrType(), decl.identifier(),\
atname + "." + decl.identifier() + ".get", "get" + "_" + sname + "_" + decl.identifier())
if not at.readonly():
self.getCDR_hf(at.attrType(), decl.identifier(),\
atname + "." + decl.identifier() + ".set", "set" + "_" + sname + "_" + decl.identifier())
def genSt_hf(self,st):
sname = self.namespace(st, "_")
stname = sname[string.find(sname, "_")+1:]
stname = stname[:string.find(stname, "_")]
for m in st.members():
for decl in m.declarators():
self.getCDR_hf(m.memberType(), st.identifier() + "_" + decl.identifier(),\
st.identifier() + "." + decl.identifier(), sname + "_" + decl.identifier())
def genEx_hf(self,ex):
sname = self.namespace(ex, "_")
exname = sname[string.find(sname, "_")+1:]
exname = exname[:string.find(exname, "_")]
for m in ex.members():
for decl in m.declarators():
self.getCDR_hf(m.memberType(), ex.identifier() + "_" + decl.identifier(),\
exname + "." + ex.identifier() + "_" + decl.identifier(), sname + "_" + decl.identifier())
def genUnion_hf(self,un):
sname = self.namespace(un, "_")
unname = sname[:string.rfind(sname, "_")]
unname = string.replace(unname, "_", ".")
self.getCDR_hf(un.switchType().unalias(), un.identifier(),\
unname + "." + un.identifier(), sname + "_" + un.identifier())
for uc in un.cases(): # for all UnionCase objects in this union
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
self.getCDR_hf(uc.caseType(), un.identifier() + "_" + uc.declarator().identifier(),\
unname + "." + un.identifier() + "." + uc.declarator().identifier(),\
sname + "_" + uc.declarator().identifier())
#
# generate proto_register_<protoname> code,
#
# in - oplist[], atlist[], stline[], unlist[]
#
def gen_proto_register(self, oplist, atlist, stlist, unlist):
self.st.out(self.template_proto_register_start, dissector_name=self.dissname)
#operation specific filters
self.st.out(self.template_proto_register_op_filter_comment)
for op in oplist:
self.genOp_hf(op)
#attribute filters
self.st.out(self.template_proto_register_at_filter_comment)
for at in atlist:
self.genAt_hf(at)
#struct filters
self.st.out(self.template_proto_register_st_filter_comment)
for st in stlist:
if (st.members()): # only if has members
self.genSt_hf(st)
# exception List filters
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
self.st.out(self.template_proto_register_ex_filter_comment)
for ex in exlist:
if (ex.members()): # only if has members
self.genEx_hf(ex)
# Union filters
self.st.out(self.template_proto_register_un_filter_comment)
for un in unlist:
self.genUnion_hf(un)
self.st.out(self.template_proto_register_end, description=self.description, protocol_name=self.protoname, dissector_name=self.dissname)
#
# in - oplist[]
#
# out - a list of unique interface names. This will be used in
# register_giop_user_module(dissect_giop_auto, "TEST IDL", "Penguin/Echo" ); so the operation
# name must be removed from the scope. And we also only want unique interfaces.
#
def get_intlist(self,oplist):
int_hash = {} # holds a hash of unique interfaces
for op in oplist:
sc = op.scopedName() # eg: penguin,tux,bite
sc1 = sc[:-1] # drop last entry
sn = idlutil.slashName(sc1) # penguin/tux
if not int_hash.has_key(sn):
int_hash[sn] = 0; # dummy val, but at least key is unique
ret = int_hash.keys()
ret.sort()
return ret
#
# in - oplist[]
#
# out - a list of exception nodes (unique). This will be used in
# to generate dissect_exception_XXX functions.
#
def get_exceptionList(self,oplist):
ex_hash = {} # holds a hash of unique exceptions.
for op in oplist:
for ex in op.raises():
if not ex_hash.has_key(ex):
ex_hash[ex] = 0; # dummy val, but at least key is unique
if self.DEBUG:
print "XXX Exception = " + ex.identifier()
ret = ex_hash.keys()
ret.sort()
return ret
#
# Simple function to take a list of array sizes and find the
# total number of elements
#
#
# eg: temp[4][3] = 12 elements
#
def get_indices_from_sizes(self,sizelist):
val = 1;
for i in sizelist:
val = val * i
return val
#
# Determine how many octets contain requested number
# of digits for an "fixed" IDL type "on the wire"
#
def dig_to_len(self,dignum):
return (dignum/2) + 1
#
# Output some TODO comment
#
def genTODO(self,message):
self.st.out(self.template_debug_TODO, message=message)
#
# Output some WARNING comment
#
def genWARNING(self,message):
self.st.out(self.template_debug_WARNING, message=message)
#
# Templates for C code
#
template_helper_function_comment = """\
/*
* @repoid@
*/"""
template_helper_function_vars_start = """\
/* Operation specific Variable declarations Begin */"""
template_helper_function_vars_end = """\
/* Operation specific Variable declarations End */
(void)item; /* Avoid coverity param_set_but_unused parse warning */
"""
template_helper_function_start = """\
static void
decode_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{"""
template_helper_function_end = """\
}
"""
#
# proto_reg_handoff() templates
#
template_proto_reg_handoff_start = """\
/* register me as handler for these interfaces */
void proto_reg_handoff_giop_@dissector_name@(void)
{"""
template_proto_reg_handoff_body = """\
/* Register for Explicit Dissection */
register_giop_user_module(dissect_@dissector_name@, \"@protocol_name@\", \"@interface@\", proto_@dissector_name@ ); /* explicit dissector */
"""
template_proto_reg_handoff_heuristic = """\
/* Register for Heuristic Dissection */
register_giop_user(dissect_@dissector_name@, \"@protocol_name@\" ,proto_@dissector_name@); /* heuristic dissector */
"""
template_proto_reg_handoff_end = """\
}
"""
#
# Prototype
#
template_prototype = """
void proto_register_giop_@dissector_name@(void);
void proto_reg_handoff_giop_@dissector_name@(void);"""
#
# Initialize the protocol
#
template_protocol = """
/* Initialise the protocol and subtree pointers */
static int proto_@dissector_name@ = -1;
static gint ett_@dissector_name@ = -1;
"""
#
# Initialize the boundary Alignment
#
template_init_boundary = """
/* Initialise the initial Alignment */
static guint32 boundary = GIOP_HEADER_SIZE; /* initial value */"""
#
# plugin_register and plugin_reg_handoff templates
#
template_plugin_register = """
#if 0
WS_DLL_PUBLIC_DEF void
plugin_register(void)
{
if (proto_@dissector_name@ == -1) {
proto_register_giop_@dissector_name@();
}
}
WS_DLL_PUBLIC_DEF void
plugin_reg_handoff(void){
proto_register_handoff_giop_@dissector_name@();
}
#endif
"""
#
# proto_register_<dissector name>(void) templates
#
template_proto_register_start = """
/* Register the protocol with Wireshark */
void proto_register_giop_@dissector_name@(void)
{
/* setup list of header fields */
static hf_register_info hf[] = {
/* field that indicates the currently ongoing request/reply exchange */
{&hf_operationrequest, {"Request_Operation","giop-@[email protected]_Operation",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_proto_register_end = """
};
static ei_register_info ei[] = {
{ &ei_@dissector_name@_unknown_giop_msg, { "giop-@[email protected]_giop_msg", PI_PROTOCOL, PI_WARN, "Unknown GIOP message", EXPFILL }},
{ &ei_@dissector_name@_unknown_exception, { "giop-@[email protected]_exception", PI_PROTOCOL, PI_WARN, "Unknown exception", EXPFILL }},
{ &ei_@dissector_name@_unknown_reply_status, { "giop-@[email protected]_reply_status", PI_PROTOCOL, PI_WARN, "Unknown reply status", EXPFILL }},
};
/* setup protocol subtree array */
static gint *ett[] = {
&ett_@dissector_name@,
};
expert_module_t* expert_@dissector_name@;
/* Register the protocol name and description */
proto_@dissector_name@ = proto_register_protocol(\"@description@\" , \"@protocol_name@\", \"giop-@dissector_name@\" );
proto_register_field_array(proto_@dissector_name@, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
expert_@dissector_name@ = expert_register_protocol(proto_@dissector_name@);
expert_register_field_array(expert_@dissector_name@, ei, array_length(ei));
}
"""
template_proto_register_op_filter_comment = """\
/* Operation filters */"""
template_proto_register_at_filter_comment = """\
/* Attribute filters */"""
template_proto_register_st_filter_comment = """\
/* Struct filters */"""
template_proto_register_ex_filter_comment = """\
/* User exception filters */"""
template_proto_register_un_filter_comment = """\
/* Union filters */"""
template_proto_register_ei_filters = """\
/* Expert info filters */
static expert_field ei_@dissector_name@_unknown_giop_msg = EI_INIT;
static expert_field ei_@dissector_name@_unknown_exception = EI_INIT;
static expert_field ei_@dissector_name@_unknown_reply_status = EI_INIT;
"""
#
# template for delegation code
#
template_op_delegate_code = """\
if (strcmp(operation, "@opname@") == 0
&& (!idlname || strcmp(idlname, \"@interface@\") == 0)) {
item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_@sname@(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
#
# Templates for the helper functions
#
#
#
template_helper_switch_msgtype_start = """\
switch(header->message_type) {"""
template_helper_switch_msgtype_default_start = """\
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);"""
template_helper_switch_msgtype_default_end = """\
break;"""
template_helper_switch_msgtype_end = """\
} /* switch(header->message_type) */"""
template_helper_switch_msgtype_request_start = """\
case Request:"""
template_helper_switch_msgtype_request_end = """\
break;"""
template_helper_switch_msgtype_reply_start = """\
case Reply:"""
template_helper_switch_msgtype_reply_no_exception_start = """\
case NO_EXCEPTION:"""
template_helper_switch_msgtype_reply_no_exception_end = """\
break;"""
template_helper_switch_msgtype_reply_user_exception_start = """\
case USER_EXCEPTION:"""
template_helper_switch_msgtype_reply_user_exception_end = """\
break;"""
template_helper_switch_msgtype_reply_default_start = """\
default:
/* Unknown Exception */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_exception, "Unknown exception %d", header->rep_status);"""
template_helper_switch_msgtype_reply_default_end = """\
break;"""
template_helper_switch_msgtype_reply_end = """\
break;"""
template_helper_switch_msgtype_default_start = """\
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);"""
template_helper_switch_msgtype_default_end = """\
break;"""
template_helper_switch_rep_status_start = """\
switch(header->rep_status) {"""
template_helper_switch_rep_status_default_start = """\
default:
/* Unknown Reply Status */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_reply_status, "Unknown reply status %d", header->rep_status);"""
template_helper_switch_rep_status_default_end = """\
break;"""
template_helper_switch_rep_status_end = """\
} /* switch(header->rep_status) */
break;"""
#
# Templates for get_CDR_xxx accessors
#
template_get_CDR_ulong = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_ulong(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_short = """\
proto_tree_add_int(tree, hf_@hfname@, tvb, *offset-2, 2, get_CDR_short(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_void = """\
/* Function returns void */
"""
template_get_CDR_long = """\
proto_tree_add_int(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_ushort = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-2, 2, get_CDR_ushort(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_float = """\
proto_tree_add_float(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_float(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_double = """\
proto_tree_add_double(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_double(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_longlong = """\
proto_tree_add_int64(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_long_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_ulonglong = """\
proto_tree_add_uint64(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_ulong_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_boolean = """\
proto_tree_add_boolean(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_boolean(tvb,offset));
"""
template_get_CDR_char = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_char(tvb,offset));
"""
template_get_CDR_octet = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_octet(tvb,offset));
"""
template_get_CDR_any = """\
get_CDR_any(tvb, pinfo, tree, item, offset, stream_is_big_endian, boundary, header);
"""
template_get_CDR_fixed = """\
get_CDR_fixed(tvb, pinfo, item, &seq, offset, @digits@, @scale@);
proto_tree_add_text(tree,tvb,*offset-@length@, @length@, "@varname@ < @digits@, @scale@> = %s",seq);
"""
template_get_CDR_enum_symbolic = """\
u_octet4 = get_CDR_enum(tvb,offset,stream_is_big_endian, boundary);
/* coverity[returned_pointer] */
item = proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-4, 4, u_octet4);
"""
template_get_CDR_string = """\
giop_add_CDR_string(tree, tvb, offset, stream_is_big_endian, boundary, hf_@hfname@);
"""
template_get_CDR_wstring = """\
u_octet4 = get_CDR_wstring(tvb, &seq, offset, stream_is_big_endian, boundary, header);
proto_tree_add_text(tree,tvb,*offset-u_octet4,u_octet4,"@varname@ (%u) = %s",
u_octet4, (u_octet4 > 0) ? seq : \"\");
"""
template_get_CDR_wchar = """\
s_octet1 = get_CDR_wchar(tvb, &seq, offset, header);
if (tree) {
if (s_octet1 > 0)
proto_tree_add_text(tree,tvb,*offset-1-s_octet1,1,"length = %u",s_octet1);
if (s_octet1 < 0)
s_octet1 = -s_octet1;
if (s_octet1 > 0)
proto_tree_add_text(tree,tvb,*offset-s_octet1,s_octet1,"@varname@ = %s",seq);
}
"""
template_get_CDR_TypeCode = """\
u_octet4 = get_CDR_typeCode(tvb, pinfo, tree, offset, stream_is_big_endian, boundary, header);
"""
template_get_CDR_object = """\
get_CDR_object(tvb, pinfo, tree, offset, stream_is_big_endian, boundary);
"""
template_get_CDR_sequence_length = """\
u_octet4_loop_@seqname@ = get_CDR_ulong(tvb, offset, stream_is_big_endian, boundary);
/* coverity[returned_pointer] */
item = proto_tree_add_uint(tree, hf_@seqname@_loop, tvb,*offset-4, 4, u_octet4_loop_@seqname@);
"""
template_get_CDR_sequence_loop_start = """\
for (i_@seqname@=0; i_@seqname@ < u_octet4_loop_@seqname@; i_@seqname@++) {
"""
template_get_CDR_sequence_loop_end = """\
}
"""
template_get_CDR_sequence_octet = """\
if (u_octet4_loop_@seqname@ > 0 && tree) {
get_CDR_octet_seq(tvb, &binary_seq_@seqname@, offset,
u_octet4_loop_@seqname@);
text_seq_@seqname@ = make_printable_string(binary_seq_@seqname@,
u_octet4_loop_@seqname@);
proto_tree_add_text(tree, tvb, *offset - u_octet4_loop_@seqname@,
u_octet4_loop_@seqname@, \"@seqname@: %s\", text_seq_@seqname@);
}
"""
template_get_CDR_array_start = """\
for (i_@aname@=0; i_@aname@ < @aval@; i_@aname@++) {
"""
template_get_CDR_array_end = """\
}
"""
template_get_CDR_array_comment = """\
/* Array: @aname@[ @asize@] */
"""
template_structure_start = """\
/* Begin struct \"@name@\" */"""
template_structure_end = """\
/* End struct \"@name@\" */"""
template_union_start = """\
/* Begin union \"@name@\" */"""
template_union_end = """\
/* End union \"@name@\" */"""
#
# Templates for get_CDR_xxx_hf accessors
#
template_get_CDR_ulong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_short_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_long_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_ushort_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_float_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_FLOAT,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_double_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_DOUBLE,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_longlong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT64,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_ulonglong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT64,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_boolean_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_BOOLEAN,8,NULL,0x01,NULL,HFILL}},"""
template_get_CDR_char_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_octet_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_HEX,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_enum_symbolic_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
template_get_CDR_string_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_wstring_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_wchar_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_TypeCode_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_sequence_hf = """\
{&hf_@hfname@_loop, {"Seq length of @descname@","giop-@dissector_name@.@[email protected]",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_sequence_octet_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_HEX,NULL,0x0,NULL,HFILL}},"""
#
# Program Header Template
#
template_Header = """\
/* packet-@[email protected]
*
* Routines for IDL dissection
*
* Autogenerated from idl2wrs
* Copyright 2001 Frank Singleton <frank.singleton@@ericsson.com>
*/
"""
template_wireshark_copyright = """\
/*
* Wireshark - Network traffic analyzer
* By Gerald Combs
* Copyright 1999 - 2012 Gerald Combs
*/
"""
#
# GPL Template
#
template_GPL = """\
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
"""
#
# Modelines Template
#
template_Modelines = """\
/*
* Editor modelines
*
* Local Variables:
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*
* ex: set shiftwidth=4 tabstop=8 expandtab:
* :indentSize=4:tabSize=8:noTabs=true:
*/"""
#
# Includes template
#
template_Includes = """\
#include "config.h"
#include <gmodule.h>
#include <string.h>
#include <glib.h>
#include <epan/packet.h>
#include <epan/proto.h>
#include <epan/dissectors/packet-giop.h>
#include <epan/expert.h>
#ifdef _MSC_VER
/* disable warning: "unreference local variable" */
#pragma warning(disable:4101)
#endif
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif"""
#
# Main dissector entry templates
#
template_main_dissector_start = """\
/*
* Called once we accept the packet as being for us; it sets the
* Protocol and Info columns and creates the top-level protocol
* tree item.
*/
static proto_tree *
start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset)
{
proto_item *ti = NULL;
proto_tree *tree = NULL; /* init later, inside if(tree) */
col_set_str(pinfo->cinfo, COL_PROTOCOL, \"@disprot@\");
/*
* Do not clear COL_INFO, as nothing is being written there by
* this dissector yet. So leave it as is from the GIOP dissector.
* TODO: add something useful to COL_INFO
* col_clear(pinfo->cinfo, COL_INFO);
*/
if (ptree) {
ti = proto_tree_add_item(ptree, proto_@dissname@, tvb, *offset, -1, ENC_NA);
tree = proto_item_add_subtree(ti, ett_@dissname@);
}
return tree;
}
static proto_item*
process_RequestOperation(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, MessageHeader *header, const gchar *operation)
{
proto_item *pi;
if(header->message_type == Reply) {
/* fill-up info column */
col_append_fstr(pinfo->cinfo, COL_INFO, " op = %s",operation);
}
/* fill-up the field */
pi=proto_tree_add_string(ptree, hf_operationrequest, tvb, 0, 0, operation);
PROTO_ITEM_SET_GENERATED(pi);
return pi;
}
static gboolean
dissect_@dissname@(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset, MessageHeader *header, const gchar *operation, gchar *idlname)
{
proto_item *item _U_;
proto_tree *tree _U_;
gboolean stream_is_big_endian = is_big_endian(header); /* get endianess */
/* If we have a USER Exception, then decode it and return */
if ((header->message_type == Reply) && (header->rep_status == USER_EXCEPTION)) {
return decode_user_exception(tvb, pinfo, ptree, offset, header, operation, stream_is_big_endian);
}
"""
template_main_dissector_switch_msgtype_start = """\
switch(header->message_type) {
"""
template_main_dissector_switch_msgtype_start_request_reply = """\
case Request:
case Reply:
"""
template_main_dissector_switch_msgtype_end_request_reply = """\
break;
"""
template_main_dissector_switch_msgtype_all_other_msgtype = """\
case CancelRequest:
case LocateRequest:
case LocateReply:
case CloseConnection:
case MessageError:
case Fragment:
return FALSE; /* not handled yet */
default:
return FALSE; /* not handled yet */
} /* switch */
"""
template_main_dissector_end = """\
return FALSE;
} /* End of main dissector */
"""
#-------------------------------------------------------------#
# Exception handling templates #
#-------------------------------------------------------------#
template_exception_helpers_start = """\
/* Begin Exception Helper Functions */
"""
template_exception_helpers_end = """\
/* End Exception Helper Functions */
"""
#
# template for Main delegator for exception handling
#
template_main_exception_delegator_start = """\
/*
* Main delegator for exception handling
*
*/
static gboolean
decode_user_exception(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *ptree _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_tree *tree _U_;
if (!header->exception_id)
return FALSE;
"""
#
# template for exception delegation code body
#
template_ex_delegate_code = """\
if (strcmp(header->exception_id, "@exname@") == 0) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_ex_@sname@(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian); /* @exname@ */
return TRUE;
}
"""
#
# End of Main delegator for exception handling
#
template_main_exception_delegator_end = """
return FALSE; /* user exception not found */
}
"""
#
# template for exception helper code
#
template_exception_helper_function_start = """\
/* Exception = @exname@ */
static void
decode_ex_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item *item _U_;
"""
template_exception_helper_function_end = """\
}
"""
#
# template for struct helper code
#
template_struct_helper_function_start = """\
/* Struct = @stname@ */
static void
decode_@sname@_st(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
"""
template_struct_helper_function_end = """\
}
"""
#
# template for union helper code
#
template_union_helper_function_start = """\
/* Union = @unname@ */
static void
decode_@sname@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item* item _U_;
"""
template_union_helper_function_end = """\
}
"""
#-------------------------------------------------------------#
# Value string templates #
#-------------------------------------------------------------#
template_value_string_start = """\
static const value_string @valstringname@[] = {
"""
template_value_string_entry = """\
{ @intval@, \"@description@\" },"""
template_value_string_end = """\
{ 0, NULL },
};
"""
#-------------------------------------------------------------#
# Enum handling templates #
#-------------------------------------------------------------#
template_comment_enums_start = """\
/*
* IDL Enums Start
*/
"""
template_comment_enums_end = """\
/*
* IDL Enums End
*/
"""
template_comment_enum_comment = """\
/*
* Enum = @ename@
*/"""
#-------------------------------------------------------------#
# Attribute handling templates #
#-------------------------------------------------------------#
template_comment_attributes_start = """\
/*
* IDL Attributes Start
*/
"""
#
# get/set accessor method names are language mapping dependant.
#
template_attributes_declare_Java_get = """static const char get_@sname@_at[] = \"_get_@atname@\" ;"""
template_attributes_declare_Java_set = """static const char set_@sname@_at[] = \"_set_@atname@\" ;"""
template_comment_attributes_end = """
/*
* IDL Attributes End
*/
"""
#
# template for Attribute delegation code
#
# Note: _get_xxx() should only be called for Reply with NO_EXCEPTION
# Note: _set_xxx() should only be called for Request
#
#
template_at_delegate_code_get = """\
if (strcmp(operation, get_@sname@_at) == 0 && (header->message_type == Reply) && (header->rep_status == NO_EXCEPTION) ) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_get_@sname@_at(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
template_at_delegate_code_set = """\
if (strcmp(operation, set_@sname@_at) == 0 && (header->message_type == Request) ) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_set_@sname@_at(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
template_attribute_helpers_start = """\
/* Begin Attribute Helper Functions */
"""
template_attribute_helpers_end = """\
/* End Attribute Helper Functions */
"""
#
# template for attribute helper code
#
template_attribute_helper_function_start = """\
/* Attribute = @atname@ */
static void
decode_@sname@_at(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item* item _U_;
"""
template_attribute_helper_function_end = """\
}
"""
#-------------------------------------------------------------#
# Debugging templates #
#-------------------------------------------------------------#
#
# Template for outputting TODO "C" comments
# so user know I need ti improve something.
#
template_debug_TODO = """\
/* TODO - @message@ */
"""
#
# Template for outputting WARNING "C" comments
# so user know if I have found a problem.
#
template_debug_WARNING = """\
/* WARNING - @message@ */
"""
#-------------------------------------------------------------#
# IDL Union templates #
#-------------------------------------------------------------#
template_comment_union_code_start = """\
/*
* IDL Union Start - @uname@
*/
"""
template_comment_union_code_end = """
/*
* IDL union End - @uname@
*/
"""
template_comment_union_code_discriminant = """\
/*
* IDL Union - Discriminant - @uname@
*/
"""
#
# Cast Unions types to something appropriate
# Enum value cast to guint32, all others cast to gint32
# as omniidl accessor returns integer or Enum.
#
template_union_code_save_discriminant_enum = """\
disc_s_@discname@ = (gint32) u_octet4; /* save Enum Value discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_long = """\
disc_s_@discname@ = (gint32) s_octet4; /* save gint32 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_ulong = """\
disc_s_@discname@ = (gint32) u_octet4; /* save guint32 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_short = """\
disc_s_@discname@ = (gint32) s_octet2; /* save gint16 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_ushort = """\
disc_s_@discname@ = (gint32) u_octet2; /* save guint16 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_char = """\
disc_s_@discname@ = (gint32) u_octet1; /* save guint1 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_boolean = """\
disc_s_@discname@ = (gint32) u_octet1; /* save guint1 discriminant and cast to gint32 */
"""
template_comment_union_code_label_compare_start = """\
if (disc_s_@discname@ == @labelval@) {
"""
template_comment_union_code_label_compare_end = """\
return; /* End Compare for this discriminant type */
}
"""
template_comment_union_code_label_default_start = """
/* Default Union Case Start */
"""
template_comment_union_code_label_default_end = """\
/* Default Union Case End */
"""
#
# Templates for function prototypes.
# This is used in genDeclares() for declaring function prototypes
# for structs and union helper functions.
#
template_hf_operations = """
static int hf_operationrequest = -1;/* Request_Operation field */
"""
template_hf = """\
static int hf_@name@ = -1;"""
template_prototype_start_dissecting = """
static proto_tree *start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset);
"""
template_prototype_struct_start = """\
/* Struct prototype declaration Start */
"""
template_prototype_struct_end = """\
/* Struct prototype declaration End */
"""
template_prototype_struct_body = """\
/* Struct = @stname@ */
static void decode_@name@_st(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_);
"""
template_decode_struct = """\
decode_@name@_st(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);"""
template_prototype_union_start = """\
/* Union prototype declaration Start */"""
template_prototype_union_end = """\
/* Union prototype declaration End */"""
template_prototype_union_body = """
/* Union = @unname@ */
static void decode_@name@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_);
"""
template_decode_union = """
decode_@name@_un(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
"""
#
# Editor modelines - http://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
#
|
gpl-2.0
|
Peddle/hue
|
desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/fields.py
|
163
|
8292
|
from boto.dynamodb2.types import STRING
class BaseSchemaField(object):
"""
An abstract class for defining schema fields.
Contains most of the core functionality for the field. Subclasses must
define an ``attr_type`` to pass to DynamoDB.
"""
attr_type = None
def __init__(self, name, data_type=STRING):
"""
Creates a Python schema field, to represent the data to pass to
DynamoDB.
Requires a ``name`` parameter, which should be a string name of the
field.
Optionally accepts a ``data_type`` parameter, which should be a
constant from ``boto.dynamodb2.types``. (Default: ``STRING``)
"""
self.name = name
self.data_type = data_type
def definition(self):
"""
Returns the attribute definition structure DynamoDB expects.
Example::
>>> field.definition()
{
'AttributeName': 'username',
'AttributeType': 'S',
}
"""
return {
'AttributeName': self.name,
'AttributeType': self.data_type,
}
def schema(self):
"""
Returns the schema structure DynamoDB expects.
Example::
>>> field.schema()
{
'AttributeName': 'username',
'KeyType': 'HASH',
}
"""
return {
'AttributeName': self.name,
'KeyType': self.attr_type,
}
class HashKey(BaseSchemaField):
"""
An field representing a hash key.
Example::
>>> from boto.dynamodb2.types import NUMBER
>>> HashKey('username')
>>> HashKey('date_joined', data_type=NUMBER)
"""
attr_type = 'HASH'
class RangeKey(BaseSchemaField):
"""
An field representing a range key.
Example::
>>> from boto.dynamodb2.types import NUMBER
>>> HashKey('username')
>>> HashKey('date_joined', data_type=NUMBER)
"""
attr_type = 'RANGE'
class BaseIndexField(object):
"""
An abstract class for defining schema indexes.
Contains most of the core functionality for the index. Subclasses must
define a ``projection_type`` to pass to DynamoDB.
"""
def __init__(self, name, parts):
self.name = name
self.parts = parts
def definition(self):
"""
Returns the attribute definition structure DynamoDB expects.
Example::
>>> index.definition()
{
'AttributeName': 'username',
'AttributeType': 'S',
}
"""
definition = []
for part in self.parts:
definition.append({
'AttributeName': part.name,
'AttributeType': part.data_type,
})
return definition
def schema(self):
"""
Returns the schema structure DynamoDB expects.
Example::
>>> index.schema()
{
'IndexName': 'LastNameIndex',
'KeySchema': [
{
'AttributeName': 'username',
'KeyType': 'HASH',
},
],
'Projection': {
'ProjectionType': 'KEYS_ONLY',
}
}
"""
key_schema = []
for part in self.parts:
key_schema.append(part.schema())
return {
'IndexName': self.name,
'KeySchema': key_schema,
'Projection': {
'ProjectionType': self.projection_type,
}
}
class AllIndex(BaseIndexField):
"""
An index signifying all fields should be in the index.
Example::
>>> AllIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ])
"""
projection_type = 'ALL'
class KeysOnlyIndex(BaseIndexField):
"""
An index signifying only key fields should be in the index.
Example::
>>> KeysOnlyIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ])
"""
projection_type = 'KEYS_ONLY'
class IncludeIndex(BaseIndexField):
"""
An index signifying only certain fields should be in the index.
Example::
>>> IncludeIndex('GenderIndex', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ], includes=['gender'])
"""
projection_type = 'INCLUDE'
def __init__(self, *args, **kwargs):
self.includes_fields = kwargs.pop('includes', [])
super(IncludeIndex, self).__init__(*args, **kwargs)
def schema(self):
schema_data = super(IncludeIndex, self).schema()
schema_data['Projection']['NonKeyAttributes'] = self.includes_fields
return schema_data
class GlobalBaseIndexField(BaseIndexField):
"""
An abstract class for defining global indexes.
Contains most of the core functionality for the index. Subclasses must
define a ``projection_type`` to pass to DynamoDB.
"""
throughput = {
'read': 5,
'write': 5,
}
def __init__(self, *args, **kwargs):
throughput = kwargs.pop('throughput', None)
if throughput is not None:
self.throughput = throughput
super(GlobalBaseIndexField, self).__init__(*args, **kwargs)
def schema(self):
"""
Returns the schema structure DynamoDB expects.
Example::
>>> index.schema()
{
'IndexName': 'LastNameIndex',
'KeySchema': [
{
'AttributeName': 'username',
'KeyType': 'HASH',
},
],
'Projection': {
'ProjectionType': 'KEYS_ONLY',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
}
"""
schema_data = super(GlobalBaseIndexField, self).schema()
schema_data['ProvisionedThroughput'] = {
'ReadCapacityUnits': int(self.throughput['read']),
'WriteCapacityUnits': int(self.throughput['write']),
}
return schema_data
class GlobalAllIndex(GlobalBaseIndexField):
"""
An index signifying all fields should be in the index.
Example::
>>> GlobalAllIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ],
... throughput={
... 'read': 2,
... 'write': 1,
... })
"""
projection_type = 'ALL'
class GlobalKeysOnlyIndex(GlobalBaseIndexField):
"""
An index signifying only key fields should be in the index.
Example::
>>> GlobalKeysOnlyIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ],
... throughput={
... 'read': 2,
... 'write': 1,
... })
"""
projection_type = 'KEYS_ONLY'
class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex):
"""
An index signifying only certain fields should be in the index.
Example::
>>> GlobalIncludeIndex('GenderIndex', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ],
... includes=['gender'],
... throughput={
... 'read': 2,
... 'write': 1,
... })
"""
projection_type = 'INCLUDE'
def __init__(self, *args, **kwargs):
throughput = kwargs.pop('throughput', None)
IncludeIndex.__init__(self, *args, **kwargs)
if throughput:
kwargs['throughput'] = throughput
GlobalBaseIndexField.__init__(self, *args, **kwargs)
def schema(self):
# Pick up the includes.
schema_data = IncludeIndex.schema(self)
# Also the throughput.
schema_data.update(GlobalBaseIndexField.schema(self))
return schema_data
|
apache-2.0
|
PaulKinlan/cli-caniuse
|
site/app/scripts/bower_components/jsrepl-build/extern/python/unclosured/lib/python2.7/functools.py
|
259
|
4478
|
"""functools.py - Tools for working with functions and callable objects
"""
# Python module wrapper for _functools C module
# to allow utilities written in Python to be added
# to the functools module.
# Written by Nick Coghlan <ncoghlan at gmail.com>
# Copyright (C) 2006 Python Software Foundation.
# See C source code for _functools credits/copyright
from _functools import partial, reduce
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes of the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying partial() to
update_wrapper().
"""
return partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
('__le__', lambda self, other: self < other or self == other),
('__ge__', lambda self, other: not self < other)],
'__le__': [('__ge__', lambda self, other: not self <= other or self == other),
('__lt__', lambda self, other: self <= other and not self == other),
('__gt__', lambda self, other: not self <= other)],
'__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
('__ge__', lambda self, other: self > other or self == other),
('__le__', lambda self, other: not self > other)],
'__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
('__gt__', lambda self, other: self >= other and not self == other),
('__lt__', lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
def cmp_to_key(mycmp):
"""Convert a cmp= function into a key= function"""
class K(object):
__slots__ = ['obj']
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
def __hash__(self):
raise TypeError('hash not implemented')
return K
|
apache-2.0
|
chuckgu/Alphabeta
|
tensorflow/json_txt.py
|
1
|
1272
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 9 00:35:54 2016
@author: chuckgu
"""
import json,os
from nltk.tokenize import sent_tokenize,word_tokenize
from konlpy.tag import Twitter
import numpy as np
import sys
reload(sys)
sys.setdefaultencoding('utf8')
twitter=Twitter()
txt=[]
checklist=['Exclamation','Alpha','URL','Punctuation','Foreign','Unknown','Hashtag','ScreenName','Josa']
'''
currdir = os.getcwd()
os.chdir('%s/' % currdir)
print currdir
with open("text8", 'r') as f:
for line in f:
sentences.append(line[:100])
print sentences
'''
with open("/home/chuckgu/Desktop/project/preprocessing/x-project/word2vec/namuwiki160229/namuwiki_20160229.json") as json_file:
json_data = json.load(json_file)
for i,j in enumerate(json_data):
print i
sentences=sent_tokenize(j["text"])
if len(sentences)>5:
for line in sentences:
line=line.decode('utf-8')
#txt.append(' '.join(twitter.morphs(line)))
txt.extend([s[0]for s in twitter.pos(line,norm=True) if s[1] not in checklist])
if i==120000: break
#np.savetxt("namu.txt",txt,fmt='%s')
import cPickle as pkl
f = open('namu_wo_josa.pkl', 'wb')
pkl.dump(txt, f, -1)
f.close()
print 'saved'
|
gpl-3.0
|
trhd/meson
|
setup.py
|
4
|
4010
|
#!/usr/bin/env python3
# Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from mesonbuild.coredata import version
if sys.version_info[0] < 3:
print('Tried to install with Python 2, Meson only supports Python 3.')
sys.exit(1)
# We need to support Python installations that have nothing but the basic
# Python installation. Use setuptools when possible and fall back to
# plain distutils when setuptools is not available.
try:
from setuptools import setup
from setuptools.command.install_scripts import install_scripts as orig
except ImportError:
from distutils.core import setup
from distutils.command.install_scripts import install_scripts as orig
class install_scripts(orig):
def run(self):
if sys.platform == 'win32':
super().run()
return
if not self.skip_build:
self.run_command('build_scripts')
self.outfiles = []
if not self.dry_run:
self.mkpath(self.install_dir)
# We want the files to be installed without a suffix on Unix
for infile in self.get_inputs():
infile = os.path.basename(infile)
in_built = os.path.join(self.build_dir, infile)
in_stripped = infile[:-3] if infile.endswith('.py') else infile
outfile = os.path.join(self.install_dir, in_stripped)
# NOTE: Mode is preserved by default
self.copy_file(in_built, outfile)
self.outfiles.append(outfile)
setup(name='meson',
version=version,
description='A high performance build system',
author='Jussi Pakkanen',
author_email='[email protected]',
url='http://mesonbuild.com',
license=' Apache License, Version 2.0',
packages=['mesonbuild',
'mesonbuild.backend',
'mesonbuild.compilers',
'mesonbuild.dependencies',
'mesonbuild.modules',
'mesonbuild.scripts',
'mesonbuild.wrap'],
scripts=['meson.py',
'mesonconf.py',
'mesontest.py',
'mesonintrospect.py',
'wraptool.py'],
cmdclass={'install_scripts': install_scripts},
data_files=[('share/man/man1', ['man/meson.1',
'man/mesonconf.1',
'man/mesonintrospect.1',
'man/mesontest.1',
'man/wraptool.1'])],
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Build Tools',
],
long_description='''Meson is a cross-platform build system designed to be both as
fast and as user friendly as possible. It supports many languages and compilers, including
GCC, Clang and Visual Studio. Its build definitions are written in a simple non-turing
complete DSL.''')
|
apache-2.0
|
JuliaSprenger/python-neo
|
neo/io/neuroshareapiio.py
|
5
|
19948
|
"""
Class for "reading" data from Neuroshare compatible files (check neuroshare.org)
It runs through the whole file and searches for: analog signals, spike cutouts,
and trigger events (without duration)
Depends on: Neuroshare API 0.9.1, numpy 1.6.1, quantities 0.10.1
Supported: Read
Author: Andre Maia Chagas
"""
# note neo.core needs only numpy and quantities
import numpy as np
import quantities as pq
import os
# check to see if the neuroshare bindings are properly imported
try:
import neuroshare as ns
except ImportError as err:
print(err)
# print('\n neuroshare library not found, loading data will not work!' )
# print('\n be sure to install the library found at:')
# print('\n www.http://pythonhosted.org/neuroshare/')
else:
pass
# print('neuroshare library successfully imported')
# import BaseIO
from neo.io.baseio import BaseIO
# import objects from neo.core
from neo.core import Segment, AnalogSignal, SpikeTrain, Event, Epoch
# create an object based on BaseIO
class NeuroshareapiIO(BaseIO):
# setting some class parameters
is_readable = True # This class can only read data
is_writable = False # write is not supported
supported_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]
has_header = False
is_streameable = False
readable_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]
# This class is not able to write objects
writeable_objects = []
# # This is for GUI stuff : a definition for parameters when reading.
# # This dict should be keyed by object (`Block`). Each entry is a list
# # of tuple. The first entry in each tuple is the parameter name. The
# # second entry is a dict with keys 'value' (for default value),
# # and 'label' (for a descriptive name).
# # Note that if the highest-level object requires parameters,
# # common_io_test will be skipped.
read_params = {
Segment: [
("segment_duration", {"value": 0., "label": "Segment size (s.)"}),
("t_start", {"value": 0., "label": "start reading (s.)"}),
# ("num_analogsignal",
# {'value" : 8, "label" : "Number of recording points"}),
# ("num_spiketrain_by_channel',
# {"value" : 3, "label" : "Num of spiketrains"}),
],
}
#
# do not supported write so no GUI stuff
write_params = None
name = "Neuroshare"
extensions = []
# This object operates on neuroshare files
mode = "file"
def __init__(self, filename=None, dllpath=None):
"""
Arguments:
filename : the filename
The init function will run automatically upon calling of the class, as
in: test = MultichannelIO(filename = filetoberead.mcd), therefore the first
operations with the file are set here, so that the user doesn't have to
remember to use another method, than the ones defined in the NEO library
"""
BaseIO.__init__(self)
self.filename = filename
# set the flags for each event type
eventID = 1
analogID = 2
epochID = 3
# if a filename was given, create a dictionary with information that will
# be needed later on.
if self.filename is not None:
if dllpath is not None:
name = os.path.splitext(os.path.basename(dllpath))[0]
library = ns.Library(name, dllpath)
else:
library = None
self.fd = ns.File(self.filename, library=library)
# get all the metadata from file
self.metadata = self.fd.metadata_raw
# get sampling rate
self.metadata["sampRate"] = 1. / self.metadata["TimeStampResolution"] # hz
# create lists and array for electrode, spike cutouts and trigger channels
self.metadata["elecChannels"] = list()
self.metadata["elecChanId"] = list()
self.metadata["num_analogs"] = 0
self.metadata["spkChannels"] = list()
self.metadata["spkChanId"] = list()
self.metadata["num_spkChans"] = 0
self.metadata["triggers"] = list()
self.metadata["triggersId"] = list()
self.metadata["num_trigs"] = 0
self.metadata["digital epochs"] = list()
self.metadata["digiEpochId"] = list()
self.metadata["num_digiEpochs"] = 0
# loop through all entities in file to get the indexes for each entity
# type, so that one can run through the indexes later, upon reading the
# segment
for entity in self.fd.entities:
# if entity is analog and not the digital line recording
# (stored as analog in neuroshare files)
if entity.entity_type == analogID and entity.label[0:4] != "digi":
# get the electrode number
self.metadata["elecChannels"].append(entity.label[-4:])
# get the electrode index
self.metadata["elecChanId"].append(entity.id)
# increase the number of electrodes found
self.metadata["num_analogs"] += 1
# if the entity is a event entitiy and a trigger
if entity.entity_type == eventID and entity.label[0:4] == "trig":
# get the digital bit/trigger number
self.metadata["triggers"].append(entity.label[0:4] + entity.label[-4:])
# get the digital bit index
self.metadata["triggersId"].append(entity.id)
# increase the number of triggers found
self.metadata["num_trigs"] += 1
# if the entity is non triggered digital values with duration
if entity.entity_type == eventID and entity.label[0:4] == "digi":
# get the digital bit number
self.metadata["digital epochs"].append(entity.label[-5:])
# get the digital bit index
self.metadata["digiEpochId"].append(entity.id)
# increase the number of triggers found
self.metadata["num_digiEpochs"] += 1
# if the entity is spike cutouts
if entity.entity_type == epochID and entity.label[0:4] == "spks":
self.metadata["spkChannels"].append(entity.label[-4:])
self.metadata["spkChanId"].append(entity.id)
self.metadata["num_spkChans"] += 1
# function to create a block and read in a segment
# def create_block(self,
#
# ):
#
# blk=Block(name = self.fileName+"_segment:",
# file_datetime = str(self.metadata_raw["Time_Day"])+"/"+
# str(self.metadata_raw["Time_Month"])+"/"+
# str(self.metadata_raw["Time_Year"])+"_"+
# str(self.metadata_raw["Time_Hour"])+":"+
# str(self.metadata_raw["Time_Min"]))
#
# blk.rec_datetime = blk.file_datetime
# return blk
# create function to read segment
def read_segment(self,
lazy=False,
# all following arguments are decided by this IO and are free
t_start=0.,
segment_duration=0.,
):
"""
Return a Segment containing all analog and spike channels, as well as
all trigger events.
Parameters:
segment_duration :is the size in secend of the segment.
num_analogsignal : number of AnalogSignal in this segment
num_spiketrain : number of SpikeTrain in this segment
"""
assert not lazy, 'Do not support lazy'
# if no segment duration is given, use the complete file
if segment_duration == 0.:
segment_duration = float(self.metadata["TimeSpan"])
# if the segment duration is bigger than file, use the complete file
if segment_duration >= float(self.metadata["TimeSpan"]):
segment_duration = float(self.metadata["TimeSpan"])
# if the time sum of start point and segment duration is bigger than
# the file time span, cap it at the end
if segment_duration + t_start > float(self.metadata["TimeSpan"]):
segment_duration = float(self.metadata["TimeSpan"]) - t_start
# create an empty segment
seg = Segment(name="segment from the NeuroshareapiIO")
# read nested analosignal
if self.metadata["num_analogs"] == 0:
print("no analog signals in this file!")
else:
# run through the number of analog channels found at the __init__ function
for i in range(self.metadata["num_analogs"]):
# create an analog signal object for each channel found
ana = self.read_analogsignal(channel_index=self.metadata["elecChanId"][i],
segment_duration=segment_duration, t_start=t_start)
# add analog signal read to segment object
seg.analogsignals += [ana]
# read triggers (in this case without any duration)
for i in range(self.metadata["num_trigs"]):
# create event object for each trigger/bit found
eva = self.read_event(channel_index=self.metadata["triggersId"][i],
segment_duration=segment_duration,
t_start=t_start, )
# add event object to segment
seg.events += [eva]
# read epochs (digital events with duration)
for i in range(self.metadata["num_digiEpochs"]):
# create event object for each trigger/bit found
epa = self.read_epoch(channel_index=self.metadata["digiEpochId"][i],
segment_duration=segment_duration,
t_start=t_start, )
# add event object to segment
seg.epochs += [epa]
# read nested spiketrain
# run through all spike channels found
for i in range(self.metadata["num_spkChans"]):
# create spike object
sptr = self.read_spiketrain(channel_index=self.metadata["spkChanId"][i],
segment_duration=segment_duration,
t_start=t_start)
# add the spike object to segment
seg.spiketrains += [sptr]
seg.create_many_to_one_relationship()
return seg
"""
With this IO AnalogSignal can be accessed directly with its channel number
"""
def read_analogsignal(self,
lazy=False,
# channel index as given by the neuroshare API
channel_index=0,
# time in seconds to be read
segment_duration=0.,
# time in seconds to start reading from
t_start=0.,
):
assert not lazy, 'Do not support lazy'
# some controls:
# if no segment duration is given, use the complete file
if segment_duration == 0.:
segment_duration = float(self.metadata["TimeSpan"])
# if the segment duration is bigger than file, use the complete file
if segment_duration >= float(self.metadata["TimeSpan"]):
segment_duration = float(self.metadata["TimeSpan"])
# get the analog object
sig = self.fd.get_entity(channel_index)
# get the units (V, mV etc)
sigUnits = sig.units
# get the electrode number
chanName = sig.label[-4:]
# transform t_start into index (reading will start from this index)
startat = int(t_start * self.metadata["sampRate"])
# get the number of bins to read in
bins = int(segment_duration * self.metadata["sampRate"])
# if the number of bins to read is bigger than
# the total number of bins, read only till the end of analog object
if startat + bins > sig.item_count:
bins = sig.item_count - startat
# read the data from the sig object
sig, _, _ = sig.get_data(index=startat, count=bins)
# store it to the 'AnalogSignal' object
anasig = AnalogSignal(sig, units=sigUnits, sampling_rate=self.metadata["sampRate"] * pq.Hz,
t_start=t_start * pq.s,
t_stop=(t_start + segment_duration) * pq.s,
channel_index=channel_index)
# annotate from which electrode the signal comes from
anasig.annotate(info="signal from channel %s" % chanName)
return anasig
# function to read spike trains
def read_spiketrain(self,
lazy=False,
channel_index=0,
segment_duration=0.,
t_start=0.):
"""
Function to read in spike trains. This API still does not support read in of
specific channels as they are recorded. rather the fuunction gets the entity set
by 'channel_index' which is set in the __init__ function (all spike channels)
"""
assert not lazy, 'Do not support lazy'
# sampling rate
sr = self.metadata["sampRate"]
# create a list to store spiketrain times
times = list()
# get the spike data from a specific channel index
tempSpks = self.fd.get_entity(channel_index)
# transform t_start into index (reading will start from this index)
startat = tempSpks.get_index_by_time(t_start, 0) # zero means closest index to value
# get the last index to read, using segment duration and t_start
# -1 means last index before time
endat = tempSpks.get_index_by_time(float(segment_duration + t_start), -1)
numIndx = endat - startat
# get the end point using segment duration
# create a numpy empty array to store the waveforms
waveforms = np.array(np.zeros([numIndx, tempSpks.max_sample_count]))
# loop through the data from the specific channel index
for i in range(startat, endat, 1):
# get cutout, timestamp, cutout duration, and spike unit
tempCuts, timeStamp, duration, unit = tempSpks.get_data(i)
# save the cutout in the waveform matrix
waveforms[i] = tempCuts[0]
# append time stamp to list
times.append(timeStamp)
# create a spike train object
spiketr = SpikeTrain(times, units=pq.s,
t_stop=t_start + segment_duration,
t_start=t_start * pq.s,
name="spikes from electrode" + tempSpks.label[-3:],
waveforms=waveforms * pq.volt,
sampling_rate=sr * pq.Hz,
file_origin=self.filename,
annotate=("channel_index:" + str(channel_index)))
return spiketr
def read_event(self, lazy=False, channel_index=0,
t_start=0.,
segment_duration=0.):
"""function to read digital timestamps. this function only reads the event
onset. to get digital event durations, use the epoch function (to be implemented)."""
assert not lazy, 'Do not support lazy'
# create temporary empty lists to store data
tempNames = list()
tempTimeStamp = list()
# get entity from file
trigEntity = self.fd.get_entity(channel_index)
# transform t_start into index (reading will start from this index)
startat = trigEntity.get_index_by_time(t_start, 0) # zero means closest index to value
# get the last index to read, using segment duration and t_start
endat = trigEntity.get_index_by_time(
float(segment_duration + t_start), -1) # -1 means last index before time
# numIndx = endat-startat
# run through specified intervals in entity
for i in range(startat, endat + 1, 1): # trigEntity.item_count):
# get in which digital bit was the trigger detected
tempNames.append(trigEntity.label[-8:])
# get the time stamps of onset events
tempData, onOrOff = trigEntity.get_data(i)
# if this was an onset event, save it to the list
# on triggered recordings it seems that only onset events are
# recorded. On continuous recordings both onset(==1)
# and offset(==255) seem to be recorded
if onOrOff == 1:
# append the time stamp to them empty list
tempTimeStamp.append(tempData)
# create an event array
eva = Event(labels=np.array(tempNames, dtype="U"),
times=np.array(tempTimeStamp) * pq.s,
file_origin=self.filename,
description="the trigger events (without durations)")
return eva
def read_epoch(self, lazy=False,
channel_index=0,
t_start=0.,
segment_duration=0.):
"""function to read digital timestamps. this function reads the event
onset and offset and outputs onset and duration. to get only onsets use
the event array function"""
assert not lazy, 'Do not support lazy'
# create temporary empty lists to store data
tempNames = list()
tempTimeStamp = list()
durations = list()
# get entity from file
digEntity = self.fd.get_entity(channel_index)
# transform t_start into index (reading will start from this index)
startat = digEntity.get_index_by_time(t_start, 0) # zero means closest index to value
# get the last index to read, using segment duration and t_start
# -1 means last index before time
endat = digEntity.get_index_by_time(float(segment_duration + t_start), -1)
# run through entity using only odd "i"s
for i in range(startat, endat + 1, 1):
if i % 2 == 1:
# get in which digital bit was the trigger detected
tempNames.append(digEntity.label[-8:])
# get the time stamps of even events
tempData, onOrOff = digEntity.get_data(i - 1)
# if this was an onset event, save it to the list
# on triggered recordings it seems that only onset events are
# recorded. On continuous recordings both onset(==1)
# and offset(==255) seem to be recorded
# if onOrOff == 1:
# append the time stamp to them empty list
tempTimeStamp.append(tempData)
# get time stamps of odd events
tempData1, onOrOff = digEntity.get_data(i)
# if onOrOff == 255:
# pass
durations.append(tempData1 - tempData)
epa = Epoch(file_origin=self.filename,
times=np.array(tempTimeStamp) * pq.s,
durations=np.array(durations) * pq.s,
labels=np.array(tempNames, dtype="U"),
description="digital events with duration")
return epa
|
bsd-3-clause
|
syndicate-storage/syndicate-core
|
python/syndicate/observer/sync.py
|
2
|
12422
|
#!/usr/bin/env python
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import json
import time
import traceback
import base64
from collections import namedtuple
from Crypto.Hash import SHA256 as HashAlg
from Crypto.PublicKey import RSA as CryptoKey
from Crypto import Random
from Crypto.Signature import PKCS1_PSS as CryptoSigner
import logging
from logging import Logger
logging.basicConfig(format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
import syndicate.observer.core as observer_core
import syndicate.observer.cred as observer_cred
import syndicate.observer.push as observer_push
CONFIG = observer_core.get_config()
# objects expected by these methods
SyndicatePrincipal = namedtuple("SyndicatePrincipal", ["principal_id", "public_key_pem", "sealed_private_key"])
Volume = namedtuple("Volume", ["name", "owner_id", "description", "blocksize", "private", "archive", "cap_read_data", "cap_write_data", "cap_host_data", "slice_id"])
VolumeAccessRight = namedtuple("VolumeAccessRight", ["owner_id", "volume", "cap_read_data", "cap_write_data", "cap_host_data"])
SliceSecret = namedtuple("SliceSecret", ["slice_id", "secret"])
VolumeSlice = namedtuple("VolumeSlice", ["volume_id", "slice_id", "cap_read_data", "cap_write_data", "cap_host_data", "UG_portnum", "RG_portnum", "credentials_blob"])
#-------------------------------
def sync_volume_record(volume):
"""
Synchronize a Volume record with Syndicate.
"""
logger.info("Sync Volume = %s\n\n" % volume.name)
principal_id = volume.owner_id.email
config = observer_core.get_config()
max_UGs = None
max_RGs = None
volume_principal_id = observer_core.make_volume_principal_id(principal_id, volume.name)
# get the observer secret
try:
max_UGs = CONFIG.SYNDICATE_UG_QUOTA
max_RGs = CONFIG.SYNDICATE_RG_QUOTA
observer_secret = observer_core.get_syndicate_observer_secret(config.SYNDICATE_OBSERVER_SECRET)
except Exception, e:
traceback.print_exc()
logger.error("config is missing SYNDICATE_OBSERVER_SECRET, SYNDICATE_UG_QUOTA, SYNDICATE_RG_QUOTA")
raise e
# volume owner must exist as a Syndicate user...
try:
rc, user = observer_core.ensure_principal_exists(volume_principal_id, observer_secret, is_admin=False, max_UGs=max_UGs, max_RGs=max_RGs)
assert rc == True, "Failed to create or read volume principal '%s'" % volume_principal_id
except Exception, e:
traceback.print_exc()
logger.error("Failed to ensure principal '%s' exists" % volume_principal_id)
raise e
# volume must exist
# create or update the Volume
try:
new_volume = observer_core.ensure_volume_exists(volume_principal_id, volume, user=user)
except Exception, e:
traceback.print_exc()
logger.error("Failed to ensure volume '%s' exists" % volume.name)
raise e
# did we create the Volume?
if new_volume is not None:
# we're good
pass
# otherwise, just update it
else:
try:
rc = observer_core.update_volume(volume)
except Exception, e:
traceback.print_exc()
logger.error("Failed to update volume '%s', exception = %s" % (volume.name, e.message))
raise e
return True
#-------------------------------
def delete_volume_record(volume):
"""
Delete a volume from Syndicate.
"""
logger.info("Delete Volume =%s\n\n" % volume.name)
volume_name = volume.name
config = observer_core.get_config()
# delete the Volume on Syndicate.
try:
rc = observer_core.ensure_volume_absent(volume_name)
except Exception, e:
traceback.print_exc()
logger.error("Failed to delete volume %s", volume_name)
raise e
return rc
#-------------------------------
def sync_volumeaccessright_record(vac):
"""
Synchronize a volume access record
"""
syndicate_caps = "UNKNOWN" # for exception handling
# get arguments
config = observer_core.get_config()
principal_id = vac.owner_id.email
volume_name = vac.volume.name
syndicate_caps = observer_core.opencloud_caps_to_syndicate_caps(vac.cap_read_data, vac.cap_write_data, vac.cap_host_data)
logger.info("Sync VolumeAccessRight for (%s, %s)" % (principal_id, volume_name))
# validate config
try:
observer_secret = observer_core.get_syndicate_observer_secret(config.SYNDICATE_OBSERVER_SECRET)
except Exception, e:
traceback.print_exc()
logger.error("syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OBSERVER_SECRET")
raise e
# ensure the user exists and has credentials
try:
rc, user = observer_core.ensure_principal_exists(principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1)
assert rc is True, "Failed to ensure principal %s exists (rc = %s,%s)" % (principal_id, rc, user)
except Exception, e:
traceback.print_exc()
logger.error("Failed to ensure user '%s' exists" % principal_id)
raise e
# grant the slice-owning user the ability to provision UGs in this Volume
try:
rc = observer_core.ensure_volume_access_right_exists(principal_id, volume_name, syndicate_caps)
assert rc is True, "Failed to set up Volume access right for slice %s in %s" % (principal_id, volume_name)
except Exception, e:
traceback.print_exc()
logger.error("Failed to set up Volume access right for slice %s in %s" % (principal_id, volume_name))
raise e
except Exception, e:
traceback.print_exc()
logger.error("Faoed to ensure user %s can access Volume %s with rights %s" % (principal_id, volume_name, syndicate_caps))
raise e
return True
#-------------------------------
def delete_volumeaccessright_record(vac):
"""
Ensure that a principal no longer has access to a particular volume.
"""
principal_id = vac.owner_id.email
volume_name = vac.volume.name
try:
observer_core.ensure_volume_access_right_absent(principal_id, volume_name)
except Exception, e:
traceback.print_exc()
logger.error("Failed to revoke access from %s to %s" % (principal_id, volume_name))
raise e
return True
#-------------------------------
def sync_volumeslice_record(vs):
"""
Synchronize a VolumeSlice record
"""
logger.info("Sync VolumeSlice for (%s, %s)" % (vs.volume_id.name, vs.slice_id.name))
# extract arguments...
principal_id = vs.slice_id.creator.email
slice_name = vs.slice_id.name
volume_name = vs.volume_id.name
syndicate_caps = observer_core.opencloud_caps_to_syndicate_caps(vs.cap_read_data, vs.cap_write_data, vs.cap_host_data)
RG_port = vs.RG_portnum
UG_port = vs.UG_portnum
slice_secret = None
gateway_name_prefix = None
config = observer_core.get_config()
try:
observer_secret = observer_core.get_syndicate_observer_secret(config.SYNDICATE_OBSERVER_SECRET)
RG_closure = config.SYNDICATE_RG_CLOSURE
observer_pkey_path = config.SYNDICATE_OBSERVER_PRIVATE_KEY
syndicate_url = config.SYNDICATE_SMI_URL
gateway_name_prefix = config.SYNDICATE_GATEWAY_NAME_PREFIX
except Exception, e:
traceback.print_exc()
logger.error("syndicatelib config is missing one or more of the following: SYNDICATE_OBSERVER_SECRET, SYNDICATE_RG_CLOSURE, SYNDICATE_OBSERVER_PRIVATE_KEY, SYNDICATE_SMI_URL")
raise e
# get secrets...
try:
observer_pkey_pem = observer_core.get_observer_private_key_pem(observer_pkey_path)
assert observer_pkey_pem is not None, "Failed to load Observer private key"
# get/create the slice secret
slice_secret = observer_core.get_or_create_slice_secret(observer_pkey_pem, slice_name)
assert slice_secret is not None, "Failed to get or create slice secret for %s" % slice_name
except Exception, e:
traceback.print_exc()
logger.error("Failed to load secret credentials")
raise e
# make sure there's a slice-controlled Syndicate user account for the slice owner
slice_principal_id = observer_core.make_slice_principal_id(principal_id, slice_name)
try:
rc, user = observer_core.ensure_principal_exists(slice_principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1)
assert rc is True, "Failed to ensure principal %s exists (rc = %s,%s)" % (slice_principal_id, rc, user)
except Exception, e:
traceback.print_exc()
logger.error('Failed to ensure slice user %s exists' % slice_principal_id)
raise e
# grant the slice-owning user the ability to provision UGs in this Volume
try:
rc = observer_core.ensure_volume_access_right_exists(slice_principal_id, volume_name, syndicate_caps)
assert rc is True, "Failed to set up Volume access right for slice %s in %s" % (slice_principal_id, volume_name)
except Exception, e:
traceback.print_exc()
logger.error("Failed to set up Volume access right for slice %s in %s" % (slice_principal_id, volume_name))
raise e
# provision for the user the (single) RG the slice will instantiate in each VM.
try:
rc = observer_core.setup_global_RG(slice_principal_id, volume_name, gateway_name_prefix, slice_secret, RG_port, RG_closure)
except Exception, e:
logger.exception(e)
return False
# generate and save slice credentials....
try:
slice_cred = observer_core.save_slice_credentials(observer_pkey_pem, syndicate_url, slice_principal_id, volume_name, slice_name, observer_secret, slice_secret,
instantiate_UG=True, run_UG=True, UG_port=UG_port, UG_closure=None,
instantiate_RG=None, run_RG=True, RG_port=RG_port, RG_closure=None, RG_global_hostname="localhost",
instantiate_AG=None, run_AG=None, AG_port=0, AG_closure=None,
gateway_name_prefix=gateway_name_prefix,
existing_user=user)
assert slice_cred is not None, "Failed to generate slice credential for %s in %s" % (slice_principal_id, volume_name)
except Exception, e:
traceback.print_exc()
logger.error("Failed to generate slice credential for %s in %s" % (slice_principal_id, volume_name))
raise e
# ... and push them all out.
try:
rc = observer_push.push_credentials_to_slice(slice_name, slice_cred)
assert rc is True, "Failed to push credentials to slice %s for volume %s" % (slice_name, volume_name)
except Exception, e:
traceback.print_exc()
logger.error("Failed to push slice credentials to %s for volume %s" % (slice_name, volume_name))
raise e
return True
#-------------------------------
def delete_volumeslice_record(vs):
"""
Unmount a volume from a slice.
That is, prevent the slice from mounting it, by revoking the slice's principal's permissions and deleting its gateways.
"""
principal_id = vs.slice_id.creator.email
slice_name = vs.slice_id.name
volume_name = vs.volume_id.name
slice_principal_id = observer_core.make_slice_principal_id(principal_id, slice_name)
try:
observer_core.revoke_volume_access(slice_principal_id, volume_name)
except Exception, e:
traceback.print_exc()
logger.error("Failed to remove slice principal %s from %s" % (slice_principal_id, volume_name))
raise e
return True
|
apache-2.0
|
anirudhSK/chromium
|
tools/site_compare/operators/equals_with_mask.py
|
189
|
1589
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compare two images for equality, subject to a mask."""
from PIL import Image
from PIL import ImageChops
import os.path
def Compare(file1, file2, **kwargs):
"""Compares two images to see if they're identical subject to a mask.
An optional directory containing masks is supplied. If a mask exists
which matches file1's name, areas under the mask where it's black
are ignored.
Args:
file1: path to first image to compare
file2: path to second image to compare
kwargs: ["maskdir"] contains the directory holding the masks
Returns:
None if the images are identical
A tuple of (errorstring, image) if they're not
"""
maskdir = None
if "maskdir" in kwargs:
maskdir = kwargs["maskdir"]
im1 = Image.open(file1)
im2 = Image.open(file2)
if im1.size != im2.size:
return ("The images are of different size (%r vs %r)" %
(im1.size, im2.size), im1)
diff = ImageChops.difference(im1, im2)
if maskdir:
maskfile = os.path.join(maskdir, os.path.basename(file1))
if os.path.exists(maskfile):
mask = Image.open(maskfile)
if mask.size != im1.size:
return ("The mask is of a different size than the images (%r vs %r)" %
(mask.size, im1.size), mask)
diff = ImageChops.multiply(diff, mask.convert(diff.mode))
if max(diff.getextrema()) != (0, 0):
return ("The images differ", diff)
else:
return None
|
bsd-3-clause
|
jevinw/rec_utilities
|
babel_util/scripts/arxivmd_to_leveldb.py
|
1
|
1158
|
#!/usr/bin/env python3
import leveldb
import msgpack
import csv
from util.misc import Benchmark, open_file
REQUIRED_KEYS = {'title', 'paper_id', 'date'}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Creates a LevelDB of TSV metadata in infile")
parser.add_argument('infile')
parser.add_argument('leveldb_path')
parser.add_argument('--benchmark-freq', default=100000, type=int)
parser.add_argument('--delimiter', '-d', default='\t')
args = parser.parse_args()
db = leveldb.LevelDB(args.leveldb_path,
write_buffer_size=100 << 20, # 100MB
block_cache_size=400 << 20) # 400MB
with open_file(args.infile) as ifs:
b = Benchmark(args.benchmark_freq)
reader = csv.DictReader(ifs, delimiter=args.delimiter)
for row in reader:
if not REQUIRED_KEYS.issubset(row.keys()):
print(row)
raise KeyError("Not all required keys present")
db.Put(row["paper_id"].encode(), msgpack.packb(row))
b.increment()
b.print_freq()
print(db.GetStats())
|
agpl-3.0
|
hayd/contracts
|
src/contracts/library/__init__.py
|
3
|
1109
|
from .suggester import create_suggester
from .dummy import Any, Never
from .separate_context import SeparateContext
from .types_misc import Type, CheckType, Number
from .strings import *
from .lists import List
from .seq import Seq
from .tuple import Tuple
from .dicts import Dict
from .map import Map
from .sets import *
from .attributes import Attr
from .comparison import CheckOrder
from .arithmetic import Binary, Unary
from .compositions import OR, And, composite_contract, or_contract
from .variables import (BindVariable, VariableRef, misc_variables_contract,
int_variables_contract, misc_variables_ref,
int_variables_ref, scoped_variables_ref)
from .simple_values import EqualTo, SimpleRValue
try:
import numpy
except ImportError: # pragma: no cover
pass
else:
from .array import (ShapeContract, Shape, Array, ArrayConstraint, DType,
dtype, ArrayOR, ArrayAnd)
from .extensions import (identifier_expression, identifier_contract,
Extension, CheckCallable)
from .isinstance_imp import *
|
lgpl-3.0
|
EDUlib/edx-platform
|
lms/djangoapps/program_enrollments/admin.py
|
5
|
4269
|
"""
Admin tool for the Program Enrollments models
"""
from django.contrib import admin
from django.urls import reverse
from django.utils.html import format_html
from lms.djangoapps.program_enrollments.models import (
CourseAccessRoleAssignment,
ProgramCourseEnrollment,
ProgramEnrollment
)
class ProgramEnrollmentAdmin(admin.ModelAdmin):
"""
Admin tool for the ProgramEnrollment model
"""
# Config for instance listing.
list_display = (
'id',
'status',
'user',
'external_user_key',
'program_uuid',
'curriculum_uuid',
)
list_filter = ('status',)
search_fields = ('user__username', 'external_user_key', 'program_uuid')
# Config for instance editor.
raw_id_fields = ('user',)
def _pce_pe_id(pce):
"""
Generate a link to edit program enrollment, with ID and status in link text.
"""
pe = pce.program_enrollment
if not pe:
return None
link_url = reverse(
"admin:program_enrollments_programenrollment_change",
args=[pe.id],
)
link_text = "id={pe.id:05} ({pe.status})".format(pe=pe)
return format_html("<a href={}>{}</a>", link_url, link_text)
def _pce_pe_user(pce):
return pce.program_enrollment.user
def _pce_pe_external_user_key(pce):
return pce.program_enrollment.external_user_key
def _pce_pe_program_uuid(pce):
return pce.program_enrollment.program_uuid
def _pce_ce(pce):
"""
Generate text for course enrollment, including ID and is_active value.
"""
enrollment = pce.course_enrollment
if not enrollment:
return None
active_string = "Active" if enrollment.is_active else "Inactive"
return "id={enrollment.id:09} ({active_string})".format(
enrollment=enrollment, active_string=active_string
)
_pce_pe_id.short_description = "Program Enrollment"
_pce_pe_user.short_description = "Pgm Enrollment: User"
_pce_pe_external_user_key.short_description = "Pgm Enrollment: Ext User Key"
_pce_pe_program_uuid.short_description = "Pgm Enrollment: Pgm UUID"
_pce_ce.short_description = "Course Enrollment"
class ProgramCourseEnrollmentAdmin(admin.ModelAdmin):
"""
Admin tool for the ProgramCourseEnrollment model
"""
# Config for instance listing.
list_display = (
'id',
'status',
_pce_pe_id,
_pce_pe_user,
_pce_pe_external_user_key,
_pce_pe_program_uuid,
_pce_ce,
'course_key',
)
list_filter = ('status', 'course_key')
search_fields = (
'program_enrollment__user__username',
'program_enrollment__external_user_key',
'program_enrollment__program_uuid',
'course_key',
)
# Config for instance editor.
raw_id_fields = ('program_enrollment', 'course_enrollment')
def _pending_role_assignment_enrollment_id(pending_role_assignment):
"""
Generate a link to edit enrollment, with ID in link text.
"""
pce = pending_role_assignment.enrollment
if not pce:
return None
link_url = reverse(
"admin:program_enrollments_programcourseenrollment_change",
args=[pce.id],
)
link_text = f"id={pce.id:05}"
return format_html("<a href={}>{}</a>", link_url, link_text)
def _pending_role_assignment_external_user_key(pending_role_assignment):
"""
Generate the external user key for a pending role assignment
"""
pce = pending_role_assignment.enrollment
return _pce_pe_external_user_key(pce)
_pending_role_assignment_enrollment_id.short_description = "Program Course Enrollment"
_pending_role_assignment_external_user_key.short_description = "Pgm Enrollment: Ext User Key"
class CourseAccessRoleAssignmentAdmin(admin.ModelAdmin):
"""
Admin tool for the CourseAccessRoleAssignment model
"""
list_display = (
'id',
'role',
_pending_role_assignment_enrollment_id,
_pending_role_assignment_external_user_key
)
list_filter = ('role',)
raw_id_fields = ('enrollment',)
admin.site.register(ProgramEnrollment, ProgramEnrollmentAdmin)
admin.site.register(ProgramCourseEnrollment, ProgramCourseEnrollmentAdmin)
admin.site.register(CourseAccessRoleAssignment, CourseAccessRoleAssignmentAdmin)
|
agpl-3.0
|
autowitch/pypov
|
scenes/geomorphs/lib/environment.py
|
1
|
1895
|
from pypov.pov import Vector, Texture, Pigment, POV, File, Camera, Cylinder
from pypov.pov import LightSource, Sphere, Finish, Settings, Plane, Box, Cone
from pypov.pov import Checker, SkySphere, Union, GlobalSettings, Radiosity
from pypov.pov import Polygon_4, Difference, Object, parse_args
dark_glass = Texture(
Pigment(color=(0.025, 0.025, 0.025)),
Finish(reflection=0.05)
)
grass = Texture(Pigment(color=(0, 1, 0)))
def general(pov_file):
GlobalSettings(
Radiosity(
pretrace_start = 0.08,
pretrace_end = 0.005,
count = 400,
# nearest_count = 5,
error_bound = 0.1,
recursion_limit = 1,
# low_error_factor = .5,
# gray_threshold = 0.0,
# minimum_reuse = 0.015,
# brightness = 1,
# adc_bailout = 0.01/2,
media="on",
),
assumed_gamma = 1.0,
max_trace_level=100,
ambient_light = (0.1, 0.1, 0.1),
).write(pov_file)
Camera(
location=(-600, 150, -110),
look_at=(0, 25, 0)
# angle=30
).write(pov_file)
LightSource((100000, 100000, 100000), color=(0.6, 0.6, 0.5)).write(pov_file)
LightSource((150000, 150000, -100000), color=(0.25, 0.25, 0.3)).write(pov_file)
# LightSource((-150000, 150000, 100000), color=(0.25, 0.3, 0.25)).write(pov_file)
# LightSource((-150000, 150000, -100000), color=(0.3, 0.25, 0.25)).write(pov_file)
pov_file.declare('fn_Pigm', 'function { pigment { agate color_map { [0 color rgb 0] [1 color rgb 1] } } }')
def ground(pov_file, offset=0):
Plane(
(0, 1, 0), -0.0,
Texture(
Pigment(Checker((0.75, 0.75, 0.75), (1, 1, 1))),
scale=10
),
translate=(0, -1 * offset, 0)
).write(pov_file)
SkySphere(Pigment(color="MidnightBlue")).write(pov_file)
|
mit
|
cancro7/gem5
|
configs/dram/sweep.py
|
4
|
8090
|
# Copyright (c) 2014-2015 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
import optparse
import m5
from m5.objects import *
from m5.util import addToPath
from m5.internal.stats import periodicStatDump
addToPath('../')
from common import MemConfig
# this script is helpful to sweep the efficiency of a specific memory
# controller configuration, by varying the number of banks accessed,
# and the sequential stride size (how many bytes per activate), and
# observe what bus utilisation (bandwidth) is achieved
parser = optparse.OptionParser()
# Use a single-channel DDR3-1600 x64 by default
parser.add_option("--mem-type", type="choice", default="DDR3_1600_x64",
choices=MemConfig.mem_names(),
help = "type of memory to use")
parser.add_option("--mem-ranks", "-r", type="int", default=1,
help = "Number of ranks to iterate across")
parser.add_option("--rd_perc", type="int", default=100,
help = "Percentage of read commands")
parser.add_option("--mode", type="choice", default="DRAM",
choices=["DRAM", "DRAM_ROTATE"],
help = "DRAM: Random traffic; \
DRAM_ROTATE: Traffic rotating across banks and ranks")
parser.add_option("--addr_map", type="int", default=1,
help = "0: RoCoRaBaCh; 1: RoRaBaCoCh/RoRaBaChCo")
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# at the moment we stay with the default open-adaptive page policy,
# and address mapping
# start with the system itself, using a multi-layer 2.0 GHz
# crossbar, delivering 64 bytes / 3 cycles (one header cycle)
# which amounts to 42.7 GByte/s per layer and thus per port
system = System(membus = IOXBar(width = 32))
system.clk_domain = SrcClockDomain(clock = '2.0GHz',
voltage_domain =
VoltageDomain(voltage = '1V'))
# we are fine with 256 MB memory for now
mem_range = AddrRange('256MB')
system.mem_ranges = [mem_range]
# do not worry about reserving space for the backing store
system.mmap_using_noreserve = True
# force a single channel to match the assumptions in the DRAM traffic
# generator
options.mem_channels = 1
options.external_memory_system = 0
options.tlm_memory = 0
options.elastic_trace_en = 0
MemConfig.config_mem(options, system)
# the following assumes that we are using the native DRAM
# controller, check to be sure
if not isinstance(system.mem_ctrls[0], m5.objects.DRAMCtrl):
fatal("This script assumes the memory is a DRAMCtrl subclass")
# there is no point slowing things down by saving any data
system.mem_ctrls[0].null = True
# Set the address mapping based on input argument
# Default to RoRaBaCoCh
if options.addr_map == 0:
system.mem_ctrls[0].addr_mapping = "RoCoRaBaCh"
elif options.addr_map == 1:
system.mem_ctrls[0].addr_mapping = "RoRaBaCoCh"
else:
fatal("Did not specify a valid address map argument")
# stay in each state for 0.25 ms, long enough to warm things up, and
# short enough to avoid hitting a refresh
period = 250000000
# this is where we go off piste, and print the traffic generator
# configuration that we will later use, crazy but it works
cfg_file_name = "configs/dram/sweep.cfg"
cfg_file = open(cfg_file_name, 'w')
# stay in each state as long as the dump/reset period, use the entire
# range, issue transactions of the right DRAM burst size, and match
# the DRAM maximum bandwidth to ensure that it is saturated
# get the number of banks
nbr_banks = system.mem_ctrls[0].banks_per_rank.value
# determine the burst length in bytes
burst_size = int((system.mem_ctrls[0].devices_per_rank.value *
system.mem_ctrls[0].device_bus_width.value *
system.mem_ctrls[0].burst_length.value) / 8)
# next, get the page size in bytes
page_size = system.mem_ctrls[0].devices_per_rank.value * \
system.mem_ctrls[0].device_rowbuffer_size.value
# match the maximum bandwidth of the memory, the parameter is in seconds
# and we need it in ticks (ps)
itt = system.mem_ctrls[0].tBURST.value * 1000000000000
# assume we start at 0
max_addr = mem_range.end
# use min of the page size and 512 bytes as that should be more than
# enough
max_stride = min(512, page_size)
# now we create the state by iterating over the stride size from burst
# size to the max stride, and from using only a single bank up to the
# number of banks available
nxt_state = 0
for bank in range(1, nbr_banks + 1):
for stride_size in range(burst_size, max_stride + 1, burst_size):
cfg_file.write("STATE %d %d %s %d 0 %d %d "
"%d %d %d %d %d %d %d %d %d\n" %
(nxt_state, period, options.mode, options.rd_perc,
max_addr, burst_size, itt, itt, 0, stride_size,
page_size, nbr_banks, bank, options.addr_map,
options.mem_ranks))
nxt_state = nxt_state + 1
cfg_file.write("INIT 0\n")
# go through the states one by one
for state in range(1, nxt_state):
cfg_file.write("TRANSITION %d %d 1\n" % (state - 1, state))
cfg_file.write("TRANSITION %d %d 1\n" % (nxt_state - 1, nxt_state - 1))
cfg_file.close()
# create a traffic generator, and point it to the file we just created
system.tgen = TrafficGen(config_file = cfg_file_name)
# add a communication monitor
system.monitor = CommMonitor()
# connect the traffic generator to the bus via a communication monitor
system.tgen.port = system.monitor.slave
system.monitor.master = system.membus.slave
# connect the system port even if it is not used in this example
system.system_port = system.membus.slave
# every period, dump and reset all stats
periodicStatDump(period)
# run Forrest, run!
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
m5.instantiate()
m5.simulate(nxt_state * period)
print "DRAM sweep with burst: %d, banks: %d, max stride: %d" % \
(burst_size, nbr_banks, max_stride)
|
bsd-3-clause
|
sshah-solarflare/Libvirt-PCI-passthrough-
|
examples/domain-events/events-python/event-test.py
|
3
|
17964
|
#!/usr/bin/python -u
#
#
#
#################################################################################
# Start off by implementing a general purpose event loop for anyones use
#################################################################################
import sys
import getopt
import os
import libvirt
import select
import errno
import time
import threading
#
# This general purpose event loop will support waiting for file handle
# I/O and errors events, as well as scheduling repeatable timers with
# a fixed interval.
#
# It is a pure python implementation based around the poll() API
#
class virEventLoopPure:
# This class contains the data we need to track for a
# single file handle
class virEventLoopPureHandle:
def __init__(self, handle, fd, events, cb, opaque):
self.handle = handle
self.fd = fd
self.events = events
self.cb = cb
self.opaque = opaque
def get_id(self):
return self.handle
def get_fd(self):
return self.fd
def get_events(self):
return self.events
def set_events(self, events):
self.events = events
def dispatch(self, events):
self.cb(self.handle,
self.fd,
events,
self.opaque[0],
self.opaque[1])
# This class contains the data we need to track for a
# single periodic timer
class virEventLoopPureTimer:
def __init__(self, timer, interval, cb, opaque):
self.timer = timer
self.interval = interval
self.cb = cb
self.opaque = opaque
self.lastfired = 0
def get_id(self):
return self.timer
def get_interval(self):
return self.interval
def set_interval(self, interval):
self.interval = interval
def get_last_fired(self):
return self.lastfired
def set_last_fired(self, now):
self.lastfired = now
def dispatch(self):
self.cb(self.timer,
self.opaque[0],
self.opaque[1])
def __init__(self, debug=False):
self.debugOn = debug
self.poll = select.poll()
self.pipetrick = os.pipe()
self.nextHandleID = 1
self.nextTimerID = 1
self.handles = []
self.timers = []
self.quit = False
# The event loop can be used from multiple threads at once.
# Specifically while the main thread is sleeping in poll()
# waiting for events to occur, another thread may come along
# and add/update/remove a file handle, or timer. When this
# happens we need to interrupt the poll() sleep in the other
# thread, so that it'll see the file handle / timer changes.
#
# Using OS level signals for this is very unreliable and
# hard to implement correctly. Thus we use the real classic
# "self pipe" trick. A anonymous pipe, with one end registered
# with the event loop for input events. When we need to force
# the main thread out of a poll() sleep, we simple write a
# single byte of data to the other end of the pipe.
self.debug("Self pipe watch %d write %d" %(self.pipetrick[0], self.pipetrick[1]))
self.poll.register(self.pipetrick[0], select.POLLIN)
def debug(self, msg):
if self.debugOn:
print msg
# Calculate when the next timeout is due to occurr, returning
# the absolute timestamp for the next timeout, or 0 if there is
# no timeout due
def next_timeout(self):
next = 0
for t in self.timers:
last = t.get_last_fired()
interval = t.get_interval()
if interval < 0:
continue
if next == 0 or (last + interval) < next:
next = last + interval
return next
# Lookup a virEventLoopPureHandle object based on file descriptor
def get_handle_by_fd(self, fd):
for h in self.handles:
if h.get_fd() == fd:
return h
return None
# Lookup a virEventLoopPureHandle object based on its event loop ID
def get_handle_by_id(self, handleID):
for h in self.handles:
if h.get_id() == handleID:
return h
return None
# This is the heart of the event loop, performing one single
# iteration. It asks when the next timeout is due, and then
# calcuates the maximum amount of time it is able to sleep
# for in poll() pending file handle events.
#
# It then goes into the poll() sleep.
#
# When poll() returns, there will zero or more file handle
# events which need to be dispatched to registered callbacks
# It may also be time to fire some periodic timers.
#
# Due to the coarse granularity of schedular timeslices, if
# we ask for a sleep of 500ms in order to satisfy a timer, we
# may return upto 1 schedular timeslice early. So even though
# our sleep timeout was reached, the registered timer may not
# technically be at its expiry point. This leads to us going
# back around the loop with a crazy 5ms sleep. So when checking
# if timeouts are due, we allow a margin of 20ms, to avoid
# these pointless repeated tiny sleeps.
def run_once(self):
sleep = -1
next = self.next_timeout()
self.debug("Next timeout due at %d" % next)
if next > 0:
now = int(time.time() * 1000)
if now >= next:
sleep = 0
else:
sleep = (next - now) / 1000.0
self.debug("Poll with a sleep of %d" % sleep)
events = self.poll.poll(sleep)
# Dispatch any file handle events that occurred
for (fd, revents) in events:
# See if the events was from the self-pipe
# telling us to wakup. if so, then discard
# the data just continue
if fd == self.pipetrick[0]:
data = os.read(fd, 1)
continue
h = self.get_handle_by_fd(fd)
if h:
self.debug("Dispatch fd %d handle %d events %d" % (fd, h.get_id(), revents))
h.dispatch(self.events_from_poll(revents))
now = int(time.time() * 1000)
for t in self.timers:
interval = t.get_interval()
if interval < 0:
continue
want = t.get_last_fired() + interval
# Deduct 20ms, since schedular timeslice
# means we could be ever so slightly early
if now >= (want-20):
self.debug("Dispatch timer %d now %s want %s" % (t.get_id(), str(now), str(want)))
t.set_last_fired(now)
t.dispatch()
# Actually the event loop forever
def run_loop(self):
self.quit = False
while not self.quit:
self.run_once()
def interrupt(self):
os.write(self.pipetrick[1], 'c')
# Registers a new file handle 'fd', monitoring for 'events' (libvirt
# event constants), firing the callback cb() when an event occurs.
# Returns a unique integer identier for this handle, that should be
# used to later update/remove it
def add_handle(self, fd, events, cb, opaque):
handleID = self.nextHandleID + 1
self.nextHandleID = self.nextHandleID + 1
h = self.virEventLoopPureHandle(handleID, fd, events, cb, opaque)
self.handles.append(h)
self.poll.register(fd, self.events_to_poll(events))
self.interrupt()
self.debug("Add handle %d fd %d events %d" % (handleID, fd, events))
return handleID
# Registers a new timer with periodic expiry at 'interval' ms,
# firing cb() each time the timer expires. If 'interval' is -1,
# then the timer is registered, but not enabled
# Returns a unique integer identier for this handle, that should be
# used to later update/remove it
def add_timer(self, interval, cb, opaque):
timerID = self.nextTimerID + 1
self.nextTimerID = self.nextTimerID + 1
h = self.virEventLoopPureTimer(timerID, interval, cb, opaque)
self.timers.append(h)
self.interrupt()
self.debug("Add timer %d interval %d" % (timerID, interval))
return timerID
# Change the set of events to be monitored on the file handle
def update_handle(self, handleID, events):
h = self.get_handle_by_id(handleID)
if h:
h.set_events(events)
self.poll.unregister(h.get_fd())
self.poll.register(h.get_fd(), self.events_to_poll(events))
self.interrupt()
self.debug("Update handle %d fd %d events %d" % (handleID, h.get_fd(), events))
# Change the periodic frequency of the timer
def update_timer(self, timerID, interval):
for h in self.timers:
if h.get_id() == timerID:
h.set_interval(interval);
self.interrupt()
self.debug("Update timer %d interval %d" % (timerID, interval))
break
# Stop monitoring for events on the file handle
def remove_handle(self, handleID):
handles = []
for h in self.handles:
if h.get_id() == handleID:
self.poll.unregister(h.get_fd())
self.debug("Remove handle %d fd %d" % (handleID, h.get_fd()))
else:
handles.append(h)
self.handles = handles
self.interrupt()
# Stop firing the periodic timer
def remove_timer(self, timerID):
timers = []
for h in self.timers:
if h.get_id() != timerID:
timers.append(h)
self.debug("Remove timer %d" % timerID)
self.timers = timers
self.interrupt()
# Convert from libvirt event constants, to poll() events constants
def events_to_poll(self, events):
ret = 0
if events & libvirt.VIR_EVENT_HANDLE_READABLE:
ret |= select.POLLIN
if events & libvirt.VIR_EVENT_HANDLE_WRITABLE:
ret |= select.POLLOUT
if events & libvirt.VIR_EVENT_HANDLE_ERROR:
ret |= select.POLLERR;
if events & libvirt.VIR_EVENT_HANDLE_HANGUP:
ret |= select.POLLHUP;
return ret
# Convert from poll() event constants, to libvirt events constants
def events_from_poll(self, events):
ret = 0;
if events & select.POLLIN:
ret |= libvirt.VIR_EVENT_HANDLE_READABLE;
if events & select.POLLOUT:
ret |= libvirt.VIR_EVENT_HANDLE_WRITABLE;
if events & select.POLLNVAL:
ret |= libvirt.VIR_EVENT_HANDLE_ERROR;
if events & select.POLLERR:
ret |= libvirt.VIR_EVENT_HANDLE_ERROR;
if events & select.POLLHUP:
ret |= libvirt.VIR_EVENT_HANDLE_HANGUP;
return ret;
###########################################################################
# Now glue an instance of the general event loop into libvirt's event loop
###########################################################################
# This single global instance of the event loop wil be used for
# monitoring libvirt events
eventLoop = virEventLoopPure(debug=False)
# This keeps track of what thread is running the event loop,
# (if it is run in a background thread)
eventLoopThread = None
# These next set of 6 methods are the glue between the official
# libvirt events API, and our particular impl of the event loop
#
# There is no reason why the 'virEventLoopPure' has to be used.
# An application could easily may these 6 glue methods hook into
# another event loop such as GLib's, or something like the python
# Twisted event framework.
def virEventAddHandleImpl(fd, events, cb, opaque):
global eventLoop
return eventLoop.add_handle(fd, events, cb, opaque)
def virEventUpdateHandleImpl(handleID, events):
global eventLoop
return eventLoop.update_handle(handleID, events)
def virEventRemoveHandleImpl(handleID):
global eventLoop
return eventLoop.remove_handle(handleID)
def virEventAddTimerImpl(interval, cb, opaque):
global eventLoop
return eventLoop.add_timer(interval, cb, opaque)
def virEventUpdateTimerImpl(timerID, interval):
global eventLoop
return eventLoop.update_timer(timerID, interval)
def virEventRemoveTimerImpl(timerID):
global eventLoop
return eventLoop.remove_timer(timerID)
# This tells libvirt what event loop implementation it
# should use
def virEventLoopPureRegister():
libvirt.virEventRegisterImpl(virEventAddHandleImpl,
virEventUpdateHandleImpl,
virEventRemoveHandleImpl,
virEventAddTimerImpl,
virEventUpdateTimerImpl,
virEventRemoveTimerImpl)
# Directly run the event loop in the current thread
def virEventLoopPureRun():
global eventLoop
eventLoop.run_loop()
# Spawn a background thread to run the event loop
def virEventLoopPureStart():
global eventLoopThread
virEventLoopPureRegister()
eventLoopThread = threading.Thread(target=virEventLoopPureRun, name="libvirtEventLoop")
eventLoopThread.setDaemon(True)
eventLoopThread.start()
##########################################################################
# Everything that now follows is a simple demo of domain lifecycle events
##########################################################################
def eventToString(event):
eventStrings = ( "Defined",
"Undefined",
"Started",
"Suspended",
"Resumed",
"Stopped" );
return eventStrings[event];
def detailToString(event, detail):
eventStrings = (
( "Added", "Updated" ),
( "Removed" ),
( "Booted", "Migrated", "Restored", "Snapshot" ),
( "Paused", "Migrated", "IOError", "Watchdog" ),
( "Unpaused", "Migrated"),
( "Shutdown", "Destroyed", "Crashed", "Migrated", "Saved", "Failed", "Snapshot")
)
return eventStrings[event][detail]
def myDomainEventCallback1 (conn, dom, event, detail, opaque):
print "myDomainEventCallback1 EVENT: Domain %s(%s) %s %s" % (dom.name(), dom.ID(),
eventToString(event),
detailToString(event, detail))
def myDomainEventCallback2 (conn, dom, event, detail, opaque):
print "myDomainEventCallback2 EVENT: Domain %s(%s) %s %s" % (dom.name(), dom.ID(),
eventToString(event),
detailToString(event, detail))
def myDomainEventRebootCallback(conn, dom, opaque):
print "myDomainEventRebootCallback: Domain %s(%s)" % (dom.name(), dom.ID())
def myDomainEventRTCChangeCallback(conn, dom, utcoffset, opaque):
print "myDomainEventRTCChangeCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), utcoffset)
def myDomainEventWatchdogCallback(conn, dom, action, opaque):
print "myDomainEventWatchdogCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), action)
def myDomainEventIOErrorCallback(conn, dom, srcpath, devalias, action, opaque):
print "myDomainEventIOErrorCallback: Domain %s(%s) %s %s %d" % (dom.name(), dom.ID(), srcpath, devalias, action)
def myDomainEventGraphicsCallback(conn, dom, phase, localAddr, remoteAddr, authScheme, subject, opaque):
print "myDomainEventGraphicsCallback: Domain %s(%s) %d %s" % (dom.name(), dom.ID(), phase, authScheme)
def usage():
print "usage: "+os.path.basename(sys.argv[0])+" [uri]"
print " uri will default to qemu:///system"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"] )
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if len(sys.argv) > 1:
uri = sys.argv[1]
else:
uri = "qemu:///system"
print "Using uri:" + uri
# Run a background thread with the event loop
virEventLoopPureStart()
vc = libvirt.open(uri)
# Close connection on exit (to test cleanup paths)
old_exitfunc = getattr(sys, 'exitfunc', None)
def exit():
print "Closing " + str(vc)
vc.close()
if (old_exitfunc): old_exitfunc()
sys.exitfunc = exit
#Add 2 callbacks to prove this works with more than just one
vc.domainEventRegister(myDomainEventCallback1,None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, myDomainEventCallback2, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_REBOOT, myDomainEventRebootCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_RTC_CHANGE, myDomainEventRTCChangeCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_IO_ERROR, myDomainEventIOErrorCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_WATCHDOG, myDomainEventWatchdogCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_GRAPHICS, myDomainEventGraphicsCallback, None)
# The rest of your app would go here normally, but for sake
# of demo we'll just go to sleep. The other option is to
# run the event loop in your main thread if your app is
# totally event based.
while 1:
time.sleep(1)
if __name__ == "__main__":
main()
|
lgpl-2.1
|
ATIX-AG/ansible
|
lib/ansible/modules/system/hostname.py
|
34
|
24853
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Hiroaki Nakamura <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: hostname
author:
- Adrian Likins (@alikins)
- Hideki Saito (@saito-hideki)
version_added: "1.4"
short_description: Manage hostname
requirements: [ hostname ]
description:
- Set system's hostname, supports most OSs/Distributions, including those using systemd.
- Note, this module does *NOT* modify C(/etc/hosts). You need to modify it yourself using other modules like template or replace.
- Windows, HP-UX and AIX are not currently supported.
options:
name:
description:
- Name of the host
required: true
'''
EXAMPLES = '''
- hostname:
name: web01
'''
import os
import socket
import traceback
from ansible.module_utils.basic import (
AnsibleModule,
get_distribution,
get_distribution_version,
get_platform,
load_platform_subclass,
)
from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
from ansible.module_utils._text import to_native
class UnimplementedStrategy(object):
def __init__(self, module):
self.module = module
def update_current_and_permanent_hostname(self):
self.unimplemented_error()
def update_current_hostname(self):
self.unimplemented_error()
def update_permanent_hostname(self):
self.unimplemented_error()
def get_current_hostname(self):
self.unimplemented_error()
def set_current_hostname(self, name):
self.unimplemented_error()
def get_permanent_hostname(self):
self.unimplemented_error()
def set_permanent_hostname(self, name):
self.unimplemented_error()
def unimplemented_error(self):
platform = get_platform()
distribution = get_distribution()
if distribution is not None:
msg_platform = '%s (%s)' % (platform, distribution)
else:
msg_platform = platform
self.module.fail_json(
msg='hostname module cannot be used on platform %s' % msg_platform)
class Hostname(object):
"""
This is a generic Hostname manipulation class that is subclassed
based on platform.
A subclass may wish to set different strategy instance to self.strategy.
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
strategy_class = UnimplementedStrategy
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Hostname, args, kwargs)
def __init__(self, module):
self.module = module
self.name = module.params['name']
if self.platform == 'Linux' and ServiceMgrFactCollector.is_systemd_managed(module):
self.strategy = SystemdStrategy(module)
else:
self.strategy = self.strategy_class(module)
def update_current_and_permanent_hostname(self):
return self.strategy.update_current_and_permanent_hostname()
def get_current_hostname(self):
return self.strategy.get_current_hostname()
def set_current_hostname(self, name):
self.strategy.set_current_hostname(name)
def get_permanent_hostname(self):
return self.strategy.get_permanent_hostname()
def set_permanent_hostname(self, name):
self.strategy.set_permanent_hostname(name)
class GenericStrategy(object):
"""
This is a generic Hostname manipulation strategy class.
A subclass may wish to override some or all of these methods.
- get_current_hostname()
- get_permanent_hostname()
- set_current_hostname(name)
- set_permanent_hostname(name)
"""
def __init__(self, module):
self.module = module
self.hostname_cmd = self.module.get_bin_path('hostname', True)
self.changed = False
def update_current_and_permanent_hostname(self):
self.update_current_hostname()
self.update_permanent_hostname()
return self.changed
def update_current_hostname(self):
name = self.module.params['name']
current_name = self.get_current_hostname()
if current_name != name:
if not self.module.check_mode:
self.set_current_hostname(name)
self.changed = True
def update_permanent_hostname(self):
name = self.module.params['name']
permanent_name = self.get_permanent_hostname()
if permanent_name != name:
if not self.module.check_mode:
self.set_permanent_hostname(name)
self.changed = True
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
return None
def set_permanent_hostname(self, name):
pass
class DebianStrategy(GenericStrategy):
"""
This is a Debian family Hostname manipulation strategy class - it edits
the /etc/hostname file.
"""
HOSTNAME_FILE = '/etc/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
class SLESStrategy(GenericStrategy):
"""
This is a SLES Hostname strategy class - it edits the
/etc/HOSTNAME file.
"""
HOSTNAME_FILE = '/etc/HOSTNAME'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
class RedHatStrategy(GenericStrategy):
"""
This is a Redhat Hostname strategy class - it edits the
/etc/sysconfig/network file.
"""
NETWORK_FILE = '/etc/sysconfig/network'
def get_permanent_hostname(self):
try:
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
k, v = line.split('=')
return v.strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
lines = []
found = False
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
lines.append("HOSTNAME=%s\n" % name)
found = True
else:
lines.append(line)
finally:
f.close()
if not found:
lines.append("HOSTNAME=%s\n" % name)
f = open(self.NETWORK_FILE, 'w+')
try:
f.writelines(lines)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
class AlpineStrategy(GenericStrategy):
"""
This is a Alpine Linux Hostname manipulation strategy class - it edits
the /etc/hostname file then run hostname -F /etc/hostname.
"""
HOSTNAME_FILE = '/etc/hostname'
def update_current_and_permanent_hostname(self):
self.update_permanent_hostname()
self.update_current_hostname()
return self.changed
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, '-F', self.HOSTNAME_FILE]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class SystemdStrategy(GenericStrategy):
"""
This is a Systemd hostname manipulation strategy class - it uses
the hostnamectl command.
"""
def get_current_hostname(self):
cmd = ['hostname']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--transient', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
cmd = ['hostnamectl', '--static', 'status']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--pretty', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
cmd = ['hostnamectl', '--static', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class OpenRCStrategy(GenericStrategy):
"""
This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
the /etc/conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/conf.d/hostname'
def get_permanent_hostname(self):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
finally:
f.close()
class OpenBSDStrategy(GenericStrategy):
"""
This is a OpenBSD family Hostname manipulation strategy class - it edits
the /etc/myname file.
"""
HOSTNAME_FILE = '/etc/myname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
class SolarisStrategy(GenericStrategy):
"""
This is a Solaris11 or later Hostname manipulation strategy class - it
execute hostname command.
"""
def set_current_hostname(self, name):
cmd_option = '-t'
cmd = [self.hostname_cmd, cmd_option, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
fmri = 'svc:/system/identity:node'
pattern = 'config/nodename'
cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class FreeBSDStrategy(GenericStrategy):
"""
This is a FreeBSD hostname manipulation strategy class - it edits
the /etc/rc.conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/rc.conf.d/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
finally:
f.close()
class FedoraHostname(Hostname):
platform = 'Linux'
distribution = 'Fedora'
strategy_class = SystemdStrategy
class SLESHostname(Hostname):
platform = 'Linux'
distribution = 'Suse linux enterprise server '
try:
distribution_version = get_distribution_version()
# cast to float may raise ValueError on non SLES, we use float for a little more safety over int
if distribution_version and 10 <= float(distribution_version) <= 12:
strategy_class = SLESStrategy
else:
raise ValueError()
except ValueError:
strategy_class = UnimplementedStrategy
class OpenSUSEHostname(Hostname):
platform = 'Linux'
distribution = 'Opensuse '
strategy_class = SystemdStrategy
class ArchHostname(Hostname):
platform = 'Linux'
distribution = 'Arch'
strategy_class = SystemdStrategy
class RedHat5Hostname(Hostname):
platform = 'Linux'
distribution = 'Redhat'
strategy_class = RedHatStrategy
class RHELHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux'
strategy_class = RedHatStrategy
class RedHatServerHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux server'
strategy_class = RedHatStrategy
class RedHatWorkstationHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux workstation'
strategy_class = RedHatStrategy
class RedHatAtomicHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux atomic host'
strategy_class = RedHatStrategy
class CentOSHostname(Hostname):
platform = 'Linux'
distribution = 'Centos'
strategy_class = RedHatStrategy
class CentOSLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Centos linux'
strategy_class = RedHatStrategy
class CloudlinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Cloudlinux'
strategy_class = RedHatStrategy
class CloudlinuxServerHostname(Hostname):
platform = 'Linux'
distribution = 'Cloudlinux server'
strategy_class = RedHatStrategy
class ScientificHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific'
strategy_class = RedHatStrategy
class ScientificLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux'
strategy_class = RedHatStrategy
class ScientificLinuxCERNHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux cern slc'
strategy_class = RedHatStrategy
class OracleLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Oracle linux server'
strategy_class = RedHatStrategy
class VirtuozzoLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Virtuozzo linux'
strategy_class = RedHatStrategy
class AmazonLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Amazon'
strategy_class = RedHatStrategy
class DebianHostname(Hostname):
platform = 'Linux'
distribution = 'Debian'
strategy_class = DebianStrategy
class KaliHostname(Hostname):
platform = 'Linux'
distribution = 'Kali'
strategy_class = DebianStrategy
class UbuntuHostname(Hostname):
platform = 'Linux'
distribution = 'Ubuntu'
strategy_class = DebianStrategy
class LinuxmintHostname(Hostname):
platform = 'Linux'
distribution = 'Linuxmint'
strategy_class = DebianStrategy
class LinaroHostname(Hostname):
platform = 'Linux'
distribution = 'Linaro'
strategy_class = DebianStrategy
class DevuanHostname(Hostname):
platform = 'Linux'
distribution = 'Devuan'
strategy_class = DebianStrategy
class GentooHostname(Hostname):
platform = 'Linux'
distribution = 'Gentoo base system'
strategy_class = OpenRCStrategy
class ALTLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Altlinux'
strategy_class = RedHatStrategy
class AlpineLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Alpine'
strategy_class = AlpineStrategy
class OpenBSDHostname(Hostname):
platform = 'OpenBSD'
distribution = None
strategy_class = OpenBSDStrategy
class SolarisHostname(Hostname):
platform = 'SunOS'
distribution = None
strategy_class = SolarisStrategy
class FreeBSDHostname(Hostname):
platform = 'FreeBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NetBSDHostname(Hostname):
platform = 'NetBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NeonHostname(Hostname):
platform = 'Linux'
distribution = 'Neon'
strategy_class = DebianStrategy
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True)
),
supports_check_mode=True,
)
hostname = Hostname(module)
name = module.params['name']
current_hostname = hostname.get_current_hostname()
permanent_hostname = hostname.get_permanent_hostname()
changed = hostname.update_current_and_permanent_hostname()
if name != current_hostname:
name_before = current_hostname
elif name != permanent_hostname:
name_before = permanent_hostname
kw = dict(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
ansible_fqdn=socket.getfqdn(),
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
if changed:
kw['diff'] = {'after': 'hostname = ' + name + '\n',
'before': 'hostname = ' + name_before + '\n'}
module.exit_json(**kw)
if __name__ == '__main__':
main()
|
gpl-3.0
|
cowenberg/snap
|
snap_website/users/views.py
|
1
|
1850
|
from django.shortcuts import render, redirect
from django.http.response import HttpResponse
from users.decorators import user_is_anonymous, user_is_authenticated
from users.models import UserRegistrationForm, UserAuthenticationForm
from django.contrib.auth import logout
from django.contrib import messages
REQUEST_METHOD = 'POST'
TEMPLATE_FORM = 'form.tpl'
ROUTE_REDIRECT = 'home'
CONNECTION_TITLE = 'Connection'
CONNECTION_SUBTITLE = 'Connect yourself and manage your projects'
CONNECTION_MESSAGE = 'Your are now connected.'
DISCONNECTION_MESSAGE = 'You are now disconnected'
REGISTER_TITLE = 'Subscribe'
REGISTER_SUBTITLE = 'Join us and make your apps the best apps ever!'
REGISTER_MESSAGE = 'Thank you for registering to our services'
@user_is_anonymous
def user_register(request):
title = REGISTER_TITLE
subtitle = REGISTER_SUBTITLE
if request.method == REQUEST_METHOD:
form = UserRegistrationForm(data=request.POST)
if form.is_valid():
form.save()
messages.success(request, REGISTER_MESSAGE)
return redirect(ROUTE_REDIRECT)
else:
form = UserRegistrationForm()
return render(request, TEMPLATE_FORM, locals())
@user_is_anonymous
def user_login(request):
title = CONNECTION_TITLE
subtitle = CONNECTION_SUBTITLE
if request.method == REQUEST_METHOD:
form = UserAuthenticationForm(data=request.POST)
if form.is_valid():
form.authenticate_user(request)
messages.success(request, CONNECTION_MESSAGE)
return redirect(ROUTE_REDIRECT)
else:
form = UserAuthenticationForm()
return render(request, TEMPLATE_FORM, locals())
@user_is_authenticated
def user_logout(request):
logout(request)
messages.success(request, DISCONNECTION_MESSAGE)
return redirect(ROUTE_REDIRECT)
|
gpl-2.0
|
rnicoll/bitcoin
|
test/functional/p2p_unrequested_blocks.py
|
35
|
13067
|
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0 + node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import CBlockHeader, CInv, MSG_BLOCK, msg_block, msg_headers, msg_inv
from test_framework.p2p import p2p_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class AcceptBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
self.setup_nodes()
def run_test(self):
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
# 1. Have nodes mine a block (leave IBD)
[n.generatetoaddress(1, n.get_deterministic_priv_key().address) for n in self.nodes]
tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_and_ping(msg_block(blocks_h2[0]))
min_work_node.send_and_ping(msg_block(blocks_h2[1]))
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_and_ping(msg_block(block_h1f))
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_and_ping(msg_block(block_h2f))
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_and_ping(msg_block(block_h3))
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as it is not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_and_ping(msg_block(all_blocks[1]))
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_and_ping(msg_block(all_blocks[1]))
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
test_node.send_and_ping(msg_block(block_h1f))
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with p2p_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(MSG_BLOCK, block_h3.sha256)]))
test_node.sync_with_ping()
with p2p_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_and_ping(msg_block(block_h1f))
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_tx_with_script(block_290f.vtx[0], 0, script_sig=b"42", amount=1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_and_ping(headers_message)
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_and_ping(msg_block(block_290f))
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
self.connect_nodes(0, 1)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
|
mit
|
40223141/0505
|
static/Brython3.1.1-20150328-091302/Lib/_thread.py
|
740
|
4879
|
"""Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import _thread
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
error = RuntimeError
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
# Brython-specific to avoid circular references between threading and _threading_local
class _local:
pass
|
agpl-3.0
|
hsaputra/tensorflow
|
tensorflow/contrib/metrics/python/ops/confusion_matrix_ops.py
|
164
|
1319
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Confusion matrix related metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import confusion_matrix as cm
def confusion_matrix(labels, predictions, num_classes=None, dtype=dtypes.int32,
name=None, weights=None):
"""Deprecated. Use tf.confusion_matrix instead."""
return cm.confusion_matrix(labels=labels, predictions=predictions,
num_classes=num_classes, dtype=dtype, name=name,
weights=weights)
|
apache-2.0
|
manojpandey/hackenvision16
|
tinybank/tinybank/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
|
1005
|
92627
|
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <[email protected]>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel ([email protected])"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import stat
import errno
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# WindowsError (1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (WindowsError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
_open = builtins.open # Since 'open' is TarFile.open
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname"))
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0o200):
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0
for i in range(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
while True:
buf = src.read(16*1024)
if not buf:
break
dst.write(buf)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile(object):
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream(object):
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\037\213\010"):
return "gz"
if self.buf.startswith(b"BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = b""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
self.buf += data
x += len(data)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def seekable(self):
if not hasattr(self.fileobj, "seekable"):
# XXX gzip.GzipFile and bz2.BZ2File
return True
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
tarinfo.sparse)
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = b""
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
# XXX TextIOWrapper uses the read1() method.
read1 = read
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name),
tarinfo.linkname)
else:
linkpath = tarinfo.linkname
else:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter(object):
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
next = __next__ # for Python 2.x
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
|
mit
|
google/dnae
|
services/service-example/service_example_test.py
|
1
|
3295
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNA - Service example - Module to launch a local test."""
import os
import sys
# This is a workaround to import the relevant libraries for local testing
# purposes.
# The import will be handled through proper "vendor.add" in appengine_config.py
# for the AppEngine deployed version.
_BASEPATH = os.path.abspath(__file__).split(os.path.sep)[:-3]
for p in ('lib/connectors', 'lib/core', 'lib/utils'):
sys.path.append(os.path.sep.join(_BASEPATH + [p]))
from dcm_connector import DCMConnector
from dna_general_settings import DCM_API_VER
from dna_project_settings import DCM_PROFILE_ID
from dna_project_settings import PROJECT_ID
from gcp_connector import GCPConnector
import service_example_run
from service_example_settings import DATA_SCHEMA_STANDARD
from service_example_settings import DCM_REPORT_DATE_RANGE
from service_example_settings import DCM_REPORT_NAME
from service_example_settings import DCM_REPORT_TEMPLATE
from service_example_settings import GBQ_DATASET
from service_example_settings import GBQ_TABLE
from service_example_settings import GCE_RUN_SCRIPT
from service_example_settings import GCS_BUCKET
from service_example_settings import SERVICE_NAME
from utils import TextUtils
CREDENTIAL_FILE = '../../ddmcredentials.dat'
def main():
try:
gcp = GCPConnector(PROJECT_ID)
dcm = DCMConnector(
credential_file=CREDENTIAL_FILE,
user_email=None,
profile_id=DCM_PROFILE_ID,
api_version=DCM_API_VER)
# In this example, we're mocking the config parameters (DCM partner and
# advertiser IDs respectively):
config_data = [['1234', '1111111'],
['5678', '2222222']]
for row in config_data:
# Add params to be passed via task payload
task_params = dict()
task_params['service'] = SERVICE_NAME # Mandatory field
task_params['run_script'] = GCE_RUN_SCRIPT # Mandatory field
task_params['account_id'] = row[0]
task_params['advertiser_id'] = row[1]
task_params['bucket'] = GCS_BUCKET
task_params['dataset'] = GBQ_DATASET
# And add service-specific params
task_params['report_template'] = DCM_REPORT_TEMPLATE
task_params['report_name'] = DCM_REPORT_NAME
task_params['date_range'] = DCM_REPORT_DATE_RANGE
task_params['schema'] = DATA_SCHEMA_STANDARD
task_params['filename'] = TextUtils.timestamp() + '_' + str(
task_params['account_id']) + '.csv'
task_params['table'] = GBQ_TABLE
task_params['append'] = True
service_example_run.service_task(dcm, gcp, task_params)
# pylint: disable=broad-except
except Exception as e:
print e.message
# pylint: enable=broad-except
if __name__ == '__main__':
main()
|
apache-2.0
|
molobrakos/home-assistant
|
tests/helpers/test_config_validation.py
|
5
|
27755
|
"""Test config validators."""
from datetime import date, datetime, timedelta
import enum
import os
from socket import _GLOBAL_DEFAULT_TIMEOUT
from unittest.mock import Mock, patch
import uuid
import pytest
import voluptuous as vol
import homeassistant
import homeassistant.helpers.config_validation as cv
def test_boolean():
"""Test boolean validation."""
schema = vol.Schema(cv.boolean)
for value in ('T', 'negative', 'lock'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('true', 'On', '1', 'YES', 'enable', 1, True):
assert schema(value)
for value in ('false', 'Off', '0', 'NO', 'disable', 0, False):
assert not schema(value)
def test_latitude():
"""Test latitude validation."""
schema = vol.Schema(cv.latitude)
for value in ('invalid', None, -91, 91, '-91', '91', '123.01A'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('-89', 89, '12.34'):
schema(value)
def test_longitude():
"""Test longitude validation."""
schema = vol.Schema(cv.longitude)
for value in ('invalid', None, -181, 181, '-181', '181', '123.01A'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('-179', 179, '12.34'):
schema(value)
def test_port():
"""Test TCP/UDP network port."""
schema = vol.Schema(cv.port)
for value in ('invalid', None, -1, 0, 80000, '81000'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('1000', 21, 24574):
schema(value)
def test_isfile():
"""Validate that the value is an existing file."""
schema = vol.Schema(cv.isfile)
fake_file = 'this-file-does-not.exist'
assert not os.path.isfile(fake_file)
for value in ('invalid', None, -1, 0, 80000, fake_file):
with pytest.raises(vol.Invalid):
schema(value)
# patching methods that allow us to fake a file existing
# with write access
with patch('os.path.isfile', Mock(return_value=True)), \
patch('os.access', Mock(return_value=True)):
schema('test.txt')
def test_url():
"""Test URL."""
schema = vol.Schema(cv.url)
for value in ('invalid', None, 100, 'htp://ha.io', 'http//ha.io',
'http://??,**', 'https://??,**'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('http://localhost', 'https://localhost/test/index.html',
'http://home-assistant.io', 'http://home-assistant.io/test/',
'https://community.home-assistant.io/'):
assert schema(value)
def test_platform_config():
"""Test platform config validation."""
options = (
{},
{'hello': 'world'},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.PLATFORM_SCHEMA(value)
options = (
{'platform': 'mqtt'},
{'platform': 'mqtt', 'beer': 'yes'},
)
for value in options:
cv.PLATFORM_SCHEMA_BASE(value)
def test_ensure_list():
"""Test ensure_list."""
schema = vol.Schema(cv.ensure_list)
assert [] == schema(None)
assert [1] == schema(1)
assert [1] == schema([1])
assert ['1'] == schema('1')
assert ['1'] == schema(['1'])
assert [{'1': '2'}] == schema({'1': '2'})
def test_entity_id():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_id)
with pytest.raises(vol.MultipleInvalid):
schema('invalid_entity')
assert schema('sensor.LIGHT') == 'sensor.light'
def test_entity_ids():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_ids)
options = (
'invalid_entity',
'sensor.light,sensor_invalid',
['invalid_entity'],
['sensor.light', 'sensor_invalid'],
['sensor.light,sensor_invalid'],
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
[],
['sensor.light'],
'sensor.light'
)
for value in options:
schema(value)
assert schema('sensor.LIGHT, light.kitchen ') == [
'sensor.light', 'light.kitchen'
]
def test_entity_domain():
"""Test entity domain validation."""
schema = vol.Schema(cv.entity_domain('sensor'))
options = (
'invalid_entity',
'cover.demo',
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
print(value)
schema(value)
assert schema('sensor.LIGHT') == 'sensor.light'
def test_entities_domain():
"""Test entities domain validation."""
schema = vol.Schema(cv.entities_domain('sensor'))
options = (
None,
'',
'invalid_entity',
['sensor.light', 'cover.demo'],
['sensor.light', 'sensor_invalid'],
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
'sensor.light',
['SENSOR.light'],
['sensor.light', 'sensor.demo']
)
for value in options:
schema(value)
assert schema('sensor.LIGHT, sensor.demo ') == [
'sensor.light', 'sensor.demo'
]
assert schema(['sensor.light', 'SENSOR.demo']) == [
'sensor.light', 'sensor.demo'
]
def test_ensure_list_csv():
"""Test ensure_list_csv."""
schema = vol.Schema(cv.ensure_list_csv)
options = (
None,
12,
[],
['string'],
'string1,string2'
)
for value in options:
schema(value)
assert schema('string1, string2 ') == [
'string1', 'string2'
]
def test_event_schema():
"""Test event_schema validation."""
options = (
{}, None,
{
'event_data': {},
},
{
'event': 'state_changed',
'event_data': 1,
},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.EVENT_SCHEMA(value)
options = (
{'event': 'state_changed'},
{'event': 'state_changed', 'event_data': {'hello': 'world'}},
)
for value in options:
cv.EVENT_SCHEMA(value)
def test_icon():
"""Test icon validation."""
schema = vol.Schema(cv.icon)
for value in (False, 'work'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema('mdi:work')
schema('custom:prefix')
def test_time_period():
"""Test time_period validation."""
schema = vol.Schema(cv.time_period)
options = (
None, '', 'hello:world', '12:', '12:34:56:78',
{}, {'wrong_key': -10}
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
'8:20', '23:59', '-8:20', '-23:59:59', '-48:00', {'minutes': 5}, 1, '5'
)
for value in options:
schema(value)
assert timedelta(seconds=180) == schema('180')
assert timedelta(hours=23, minutes=59) == schema('23:59')
assert -1 * timedelta(hours=1, minutes=15) == schema('-1:15')
def test_remove_falsy():
"""Test remove falsy."""
assert cv.remove_falsy([0, None, 1, "1", {}, [], ""]) == [1, "1"]
def test_service():
"""Test service validation."""
schema = vol.Schema(cv.service)
with pytest.raises(vol.MultipleInvalid):
schema('invalid_turn_on')
schema('homeassistant.turn_on')
def test_service_schema():
"""Test service_schema validation."""
options = (
{}, None,
{
'service': 'homeassistant.turn_on',
'service_template': 'homeassistant.turn_on'
},
{
'data': {'entity_id': 'light.kitchen'},
},
{
'service': 'homeassistant.turn_on',
'data': None
},
{
'service': 'homeassistant.turn_on',
'data_template': {
'brightness': '{{ no_end'
}
},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.SERVICE_SCHEMA(value)
options = (
{'service': 'homeassistant.turn_on'},
{
'service': 'homeassistant.turn_on',
'entity_id': 'light.kitchen',
},
{
'service': 'light.turn_on',
'entity_id': 'all',
},
{
'service': 'homeassistant.turn_on',
'entity_id': ['light.kitchen', 'light.ceiling'],
},
)
for value in options:
cv.SERVICE_SCHEMA(value)
def test_slug():
"""Test slug validation."""
schema = vol.Schema(cv.slug)
for value in (None, 'hello world'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (12345, 'hello'):
schema(value)
def test_string():
"""Test string validation."""
schema = vol.Schema(cv.string)
with pytest.raises(vol.Invalid):
schema(None)
with pytest.raises(vol.Invalid):
schema([])
with pytest.raises(vol.Invalid):
schema({})
for value in (True, 1, 'hello'):
schema(value)
def test_temperature_unit():
"""Test temperature unit validation."""
schema = vol.Schema(cv.temperature_unit)
with pytest.raises(vol.MultipleInvalid):
schema('K')
schema('C')
schema('F')
def test_x10_address():
"""Test x10 addr validator."""
schema = vol.Schema(cv.x10_address)
with pytest.raises(vol.Invalid):
schema('Q1')
schema('q55')
schema('garbage_addr')
schema('a1')
schema('C11')
def test_template():
"""Test template validator."""
schema = vol.Schema(cv.template)
for value in (None, '{{ partial_print }', '{% if True %}Hello', ['test']):
with pytest.raises(vol.Invalid):
schema(value)
options = (
1, 'Hello',
'{{ beer }}',
'{% if 1 == 1 %}Hello{% else %}World{% endif %}',
)
for value in options:
schema(value)
def test_template_complex():
"""Test template_complex validator."""
schema = vol.Schema(cv.template_complex)
for value in (None, '{{ partial_print }', '{% if True %}Hello'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
1, 'Hello',
'{{ beer }}',
'{% if 1 == 1 %}Hello{% else %}World{% endif %}',
{'test': 1, 'test2': '{{ beer }}'},
['{{ beer }}', 1]
)
for value in options:
schema(value)
# ensure the validator didn't mutate the input
assert options == (
1, 'Hello',
'{{ beer }}',
'{% if 1 == 1 %}Hello{% else %}World{% endif %}',
{'test': 1, 'test2': '{{ beer }}'},
['{{ beer }}', 1]
)
def test_time_zone():
"""Test time zone validation."""
schema = vol.Schema(cv.time_zone)
with pytest.raises(vol.MultipleInvalid):
schema('America/Do_Not_Exist')
schema('America/Los_Angeles')
schema('UTC')
def test_date():
"""Test date validation."""
schema = vol.Schema(cv.date)
for value in ['Not a date', '23:42', '2016-11-23T18:59:08']:
with pytest.raises(vol.Invalid):
schema(value)
schema(datetime.now().date())
schema('2016-11-23')
def test_time():
"""Test date validation."""
schema = vol.Schema(cv.time)
for value in ['Not a time', '2016-11-23', '2016-11-23T18:59:08']:
with pytest.raises(vol.Invalid):
schema(value)
schema(datetime.now().time())
schema('23:42:00')
schema('23:42')
def test_datetime():
"""Test date time validation."""
schema = vol.Schema(cv.datetime)
for value in [date.today(), 'Wrong DateTime', '2016-11-23']:
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema(datetime.now())
schema('2016-11-23T18:59:08')
@pytest.fixture
def schema():
"""Create a schema used for testing deprecation."""
return vol.Schema({
'venus': cv.boolean,
'mars': cv.boolean,
'jupiter': cv.boolean
})
@pytest.fixture
def version(monkeypatch):
"""Patch the version used for testing to 0.5.0."""
monkeypatch.setattr(homeassistant.const, '__version__', '0.5.0')
def test_deprecated_with_no_optionals(caplog, schema):
"""
Test deprecation behaves correctly when optional params are None.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema without changing any values
- No warning or difference in output if key is not provided
"""
deprecated_schema = vol.All(
cv.deprecated('mars'),
schema
)
test_data = {'mars': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert caplog.records[0].name == __name__
assert ("The 'mars' option (with value 'True') is deprecated, "
"please remove it from your configuration") in caplog.text
assert test_data == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {'venus': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
def test_deprecated_with_replacement_key(caplog, schema):
"""
Test deprecation behaves correctly when only a replacement key is provided.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning or difference in output if neither key nor
replacement_key are provided
"""
deprecated_schema = vol.All(
cv.deprecated('mars', replacement_key='jupiter'),
schema
)
test_data = {'mars': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert ("The 'mars' option (with value 'True') is deprecated, "
"please replace it with 'jupiter'") in caplog.text
assert {'jupiter': True} == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {'jupiter': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
test_data = {'venus': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
def test_deprecated_with_invalidation_version(caplog, schema, version):
"""
Test deprecation behaves correctly with only an invalidation_version.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema without changing any values
- No warning or difference in output if key is not provided
- Once the invalidation_version is crossed, raises vol.Invalid if key
is detected
"""
deprecated_schema = vol.All(
cv.deprecated('mars', invalidation_version='1.0.0'),
schema
)
message = ("The 'mars' option (with value 'True') is deprecated, "
"please remove it from your configuration. "
"This option will become invalid in version 1.0.0")
test_data = {'mars': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert message in caplog.text
assert test_data == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {'venus': False}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
invalidated_schema = vol.All(
cv.deprecated('mars', invalidation_version='0.1.0'),
schema
)
test_data = {'mars': True}
with pytest.raises(vol.MultipleInvalid) as exc_info:
invalidated_schema(test_data)
assert ("The 'mars' option (with value 'True') is deprecated, "
"please remove it from your configuration. This option will "
"become invalid in version 0.1.0") == str(exc_info.value)
def test_deprecated_with_replacement_key_and_invalidation_version(
caplog, schema, version
):
"""
Test deprecation behaves with a replacement key & invalidation_version.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning or difference in output if neither key nor
replacement_key are provided
- Once the invalidation_version is crossed, raises vol.Invalid if key
is detected
"""
deprecated_schema = vol.All(
cv.deprecated(
'mars', replacement_key='jupiter', invalidation_version='1.0.0'
),
schema
)
warning = ("The 'mars' option (with value 'True') is deprecated, "
"please replace it with 'jupiter'. This option will become "
"invalid in version 1.0.0")
test_data = {'mars': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert warning in caplog.text
assert {'jupiter': True} == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {'jupiter': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
test_data = {'venus': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
invalidated_schema = vol.All(
cv.deprecated(
'mars', replacement_key='jupiter', invalidation_version='0.1.0'
),
schema
)
test_data = {'mars': True}
with pytest.raises(vol.MultipleInvalid) as exc_info:
invalidated_schema(test_data)
assert ("The 'mars' option (with value 'True') is deprecated, "
"please replace it with 'jupiter'. This option will become "
"invalid in version 0.1.0") == str(exc_info.value)
def test_deprecated_with_default(caplog, schema):
"""
Test deprecation behaves correctly with a default value.
This is likely a scenario that would never occur.
Expected behavior:
- Behaves identically as when the default value was not present
"""
deprecated_schema = vol.All(
cv.deprecated('mars', default=False),
schema
)
test_data = {'mars': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert caplog.records[0].name == __name__
assert ("The 'mars' option (with value 'True') is deprecated, "
"please remove it from your configuration") in caplog.text
assert test_data == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {'venus': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
def test_deprecated_with_replacement_key_and_default(caplog, schema):
"""
Test deprecation with a replacement key and default.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning if neither key nor replacement_key are provided
- Adds replacement_key with default value in this case
"""
deprecated_schema = vol.All(
cv.deprecated('mars', replacement_key='jupiter', default=False),
schema
)
test_data = {'mars': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert ("The 'mars' option (with value 'True') is deprecated, "
"please replace it with 'jupiter'") in caplog.text
assert {'jupiter': True} == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {'jupiter': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
test_data = {'venus': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert {'venus': True, 'jupiter': False} == output
deprecated_schema_with_default = vol.All(
vol.Schema({
'venus': cv.boolean,
vol.Optional('mars', default=False): cv.boolean,
vol.Optional('jupiter', default=False): cv.boolean
}),
cv.deprecated('mars', replacement_key='jupiter', default=False)
)
test_data = {'mars': True}
output = deprecated_schema_with_default(test_data.copy())
assert len(caplog.records) == 1
assert ("The 'mars' option (with value 'True') is deprecated, "
"please replace it with 'jupiter'") in caplog.text
assert {'jupiter': True} == output
def test_deprecated_with_replacement_key_invalidation_version_default(
caplog, schema, version
):
"""
Test deprecation with a replacement key, invalidation_version & default.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning if neither key nor replacement_key are provided
- Adds replacement_key with default value in this case
- Once the invalidation_version is crossed, raises vol.Invalid if key
is detected
"""
deprecated_schema = vol.All(
cv.deprecated(
'mars', replacement_key='jupiter', invalidation_version='1.0.0',
default=False
),
schema
)
test_data = {'mars': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert ("The 'mars' option (with value 'True') is deprecated, "
"please replace it with 'jupiter'. This option will become "
"invalid in version 1.0.0") in caplog.text
assert {'jupiter': True} == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {'jupiter': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
test_data = {'venus': True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert {'venus': True, 'jupiter': False} == output
invalidated_schema = vol.All(
cv.deprecated(
'mars', replacement_key='jupiter', invalidation_version='0.1.0'
),
schema
)
test_data = {'mars': True}
with pytest.raises(vol.MultipleInvalid) as exc_info:
invalidated_schema(test_data)
assert ("The 'mars' option (with value 'True') is deprecated, "
"please replace it with 'jupiter'. This option will become "
"invalid in version 0.1.0") == str(exc_info.value)
def test_key_dependency():
"""Test key_dependency validator."""
schema = vol.Schema(cv.key_dependency('beer', 'soda'))
options = (
{'beer': None}
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
{'beer': None, 'soda': None},
{'soda': None}, {}
)
for value in options:
schema(value)
def test_has_at_most_one_key():
"""Test has_at_most_one_key validator."""
schema = vol.Schema(cv.has_at_most_one_key('beer', 'soda'))
for value in (None, [], {'beer': None, 'soda': None}):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ({}, {'beer': None}, {'soda': None}):
schema(value)
def test_has_at_least_one_key():
"""Test has_at_least_one_key validator."""
schema = vol.Schema(cv.has_at_least_one_key('beer', 'soda'))
for value in (None, [], {}, {'wine': None}):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ({'beer': None}, {'soda': None}):
schema(value)
def test_enum():
"""Test enum validator."""
class TestEnum(enum.Enum):
"""Test enum."""
value1 = "Value 1"
value2 = "Value 2"
schema = vol.Schema(cv.enum(TestEnum))
with pytest.raises(vol.Invalid):
schema('value3')
def test_socket_timeout(): # pylint: disable=invalid-name
"""Test socket timeout validator."""
schema = vol.Schema(cv.socket_timeout)
with pytest.raises(vol.Invalid):
schema(0.0)
with pytest.raises(vol.Invalid):
schema(-1)
assert _GLOBAL_DEFAULT_TIMEOUT == schema(None)
assert schema(1) == 1.0
def test_matches_regex():
"""Test matches_regex validator."""
schema = vol.Schema(cv.matches_regex('.*uiae.*'))
with pytest.raises(vol.Invalid):
schema(1.0)
with pytest.raises(vol.Invalid):
schema(" nrtd ")
test_str = "This is a test including uiae."
assert schema(test_str) == test_str
def test_is_regex():
"""Test the is_regex validator."""
schema = vol.Schema(cv.is_regex)
with pytest.raises(vol.Invalid):
schema("(")
with pytest.raises(vol.Invalid):
schema({"a dict": "is not a regex"})
valid_re = ".*"
schema(valid_re)
def test_comp_entity_ids():
"""Test config validation for component entity IDs."""
schema = vol.Schema(cv.comp_entity_ids)
for valid in ('ALL', 'all', 'AlL', 'light.kitchen', ['light.kitchen'],
['light.kitchen', 'light.ceiling'], []):
schema(valid)
for invalid in (['light.kitchen', 'not-entity-id'], '*', ''):
with pytest.raises(vol.Invalid):
schema(invalid)
def test_schema_with_slug_keys_allows_old_slugs(caplog):
"""Test schema with slug keys allowing old slugs."""
schema = cv.schema_with_slug_keys(str)
with patch.dict(cv.INVALID_SLUGS_FOUND, clear=True):
for value in ('_world', 'wow__yeah'):
caplog.clear()
# Will raise if not allowing old slugs
schema({value: 'yo'})
assert "Found invalid slug {}".format(value) in caplog.text
assert len(cv.INVALID_SLUGS_FOUND) == 2
def test_entity_id_allow_old_validation(caplog):
"""Test schema allowing old entity_ids."""
schema = vol.Schema(cv.entity_id)
with patch.dict(cv.INVALID_ENTITY_IDS_FOUND, clear=True):
for value in ('hello.__world', 'great.wow__yeah'):
caplog.clear()
# Will raise if not allowing old entity ID
schema(value)
assert "Found invalid entity_id {}".format(value) in caplog.text
assert len(cv.INVALID_ENTITY_IDS_FOUND) == 2
def test_uuid4_hex(caplog):
"""Test uuid validation."""
schema = vol.Schema(cv.uuid4_hex)
for value in ['Not a hex string', '0', 0]:
with pytest.raises(vol.Invalid):
schema(value)
with pytest.raises(vol.Invalid):
# the 13th char should be 4
schema('a03d31b22eee1acc9b90eec40be6ed23')
with pytest.raises(vol.Invalid):
# the 17th char should be 8-a
schema('a03d31b22eee4acc7b90eec40be6ed23')
_hex = uuid.uuid4().hex
assert schema(_hex) == _hex
assert schema(_hex.upper()) == _hex
|
apache-2.0
|
jamiefolsom/edx-platform
|
common/djangoapps/student/tests/test_login.py
|
23
|
25132
|
'''
Tests for student activation and login
'''
import json
import unittest
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import HttpResponseBadRequest, HttpResponse
import httpretty
from mock import patch
from social.apps.django_app.default.models import UserSocialAuth
from external_auth.models import ExternalAuthMap
from student.tests.factories import UserFactory, RegistrationFactory, UserProfileFactory
from student.views import login_oauth_token
from third_party_auth.tests.utils import (
ThirdPartyOAuthTestMixin,
ThirdPartyOAuthTestMixinFacebook,
ThirdPartyOAuthTestMixinGoogle
)
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class LoginTest(TestCase):
'''
Test student.views.login_user() view
'''
def setUp(self):
super(LoginTest, self).setUp()
# Create one user and save it to the database
self.user = UserFactory.build(username='test', email='[email protected]')
self.user.set_password('test_password')
self.user.save()
# Create a registration for the user
RegistrationFactory(user=self.user)
# Create a profile for the user
UserProfileFactory(user=self.user)
# Create the test client
self.client = Client()
cache.clear()
# Store the login url
try:
self.url = reverse('login_post')
except NoReverseMatch:
self.url = reverse('login')
def test_login_success(self):
response, mock_audit_log = self._login_response('[email protected]', 'test_password', patched_audit_log='student.models.AUDIT_LOG')
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', u'[email protected]'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_success_no_pii(self):
response, mock_audit_log = self._login_response('[email protected]', 'test_password', patched_audit_log='student.models.AUDIT_LOG')
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [u'[email protected]'])
def test_login_success_unicode_email(self):
unicode_email = u'test' + unichr(40960) + u'@edx.org'
self.user.email = unicode_email
self.user.save()
response, mock_audit_log = self._login_response(unicode_email, 'test_password', patched_audit_log='student.models.AUDIT_LOG')
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', unicode_email])
def test_login_fail_no_user_exists(self):
nonexistent_email = u'[email protected]'
response, mock_audit_log = self._login_response(nonexistent_email, 'test_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
@patch.dict("django.conf.settings.FEATURES", {'ADVANCED_SECURITY': True})
def test_login_fail_incorrect_email_with_advanced_security(self):
nonexistent_email = u'[email protected]'
response, mock_audit_log = self._login_response(nonexistent_email, 'test_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_no_user_exists_no_pii(self):
nonexistent_email = u'[email protected]'
response, mock_audit_log = self._login_response(nonexistent_email, 'test_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [nonexistent_email])
def test_login_fail_wrong_password(self):
response, mock_audit_log = self._login_response('[email protected]', 'wrong_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'[email protected]', u'invalid'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_wrong_password_no_pii(self):
response, mock_audit_log = self._login_response('[email protected]', 'wrong_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'invalid'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [u'[email protected]'])
def test_login_not_activated(self):
# De-activate the user
self.user.is_active = False
self.user.save()
# Should now be unable to login
response, mock_audit_log = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=False,
value="This account has not been activated")
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_not_activated_no_pii(self):
# De-activate the user
self.user.is_active = False
self.user.save()
# Should now be unable to login
response, mock_audit_log = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=False,
value="This account has not been activated")
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [u'test'])
def test_login_unicode_email(self):
unicode_email = u'[email protected]' + unichr(40960)
response, mock_audit_log = self._login_response(unicode_email, 'test_password')
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', unicode_email])
def test_login_unicode_password(self):
unicode_password = u'test_password' + unichr(1972)
response, mock_audit_log = self._login_response('[email protected]', unicode_password)
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'[email protected]', u'invalid'])
def test_logout_logging(self):
response, _ = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 302)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout', u'test'])
def test_login_user_info_cookie(self):
response, _ = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=True)
# Verify the format of the "user info" cookie set on login
cookie = self.client.cookies[settings.EDXMKTG_USER_INFO_COOKIE_NAME]
user_info = json.loads(cookie.value)
# Check that the version is set
self.assertEqual(user_info["version"], settings.EDXMKTG_USER_INFO_COOKIE_VERSION)
# Check that the username and email are set
self.assertEqual(user_info["username"], self.user.username)
self.assertEqual(user_info["email"], self.user.email)
# Check that the URLs are absolute
for url in user_info["header_urls"].values():
self.assertIn("http://testserver/", url)
def test_logout_deletes_mktg_cookies(self):
response, _ = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=True)
# Check that the marketing site cookies have been set
self.assertIn(settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, self.client.cookies)
self.assertIn(settings.EDXMKTG_USER_INFO_COOKIE_NAME, self.client.cookies)
# Log out
logout_url = reverse('logout')
response = self.client.post(logout_url)
# Check that the marketing site cookies have been deleted
# (cookies are deleted by setting an expiration date in 1970)
for cookie_name in [settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, settings.EDXMKTG_USER_INFO_COOKIE_NAME]:
cookie = self.client.cookies[cookie_name]
self.assertIn("01-Jan-1970", cookie.get('expires'))
@override_settings(
EDXMKTG_LOGGED_IN_COOKIE_NAME=u"unicode-logged-in",
EDXMKTG_USER_INFO_COOKIE_NAME=u"unicode-user-info",
)
def test_unicode_mktg_cookie_names(self):
# When logged in cookie names are loaded from JSON files, they may
# have type `unicode` instead of `str`, which can cause errors
# when calling Django cookie manipulation functions.
response, _ = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=True)
response = self.client.post(reverse('logout'))
self.assertRedirects(response, "/")
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_logout_logging_no_pii(self):
response, _ = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 302)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [u'test'])
def test_login_ratelimited_success(self):
# Try (and fail) logging in with fewer attempts than the limit of 30
# and verify that you can still successfully log in afterwards.
for i in xrange(20):
password = u'test_password{0}'.format(i)
response, _audit_log = self._login_response('[email protected]', password)
self._assert_response(response, success=False)
# now try logging in with a valid password
response, _audit_log = self._login_response('[email protected]', 'test_password')
self._assert_response(response, success=True)
def test_login_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for i in xrange(30):
password = u'test_password{0}'.format(i)
self._login_response('[email protected]', password)
# check to see if this response indicates that this was ratelimited
response, _audit_log = self._login_response('[email protected]', 'wrong_password')
self._assert_response(response, success=False, value='Too many failed login attempts')
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session(self):
creds = {'email': '[email protected]', 'password': 'test_password'}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
self.user = User.objects.get(pk=self.user.pk)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
try:
# this test can be run with either lms or studio settings
# since studio does not have a dashboard url, we should
# look for another url that is login_required, in that case
url = reverse('dashboard')
except NoReverseMatch:
url = reverse('upload_transcripts')
response = client1.get(url)
# client1 will be logged out
self.assertEqual(response.status_code, 302)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session_with_url_not_having_login_required_decorator(self):
# accessing logout url as it does not have login-required decorator it will avoid redirect
# and go inside the enforce_single_login
creds = {'email': '[email protected]', 'password': 'test_password'}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
url = reverse('logout')
response = client1.get(url)
self.assertEqual(response.status_code, 302)
def test_change_enrollment_400(self):
"""
Tests that a 400 in change_enrollment doesn't lead to a 404
and in fact just logs in the user without incident
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponseBadRequest("I am a 400")
response, _ = self._login_response(
'[email protected]',
'test_password',
extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
def test_change_enrollment_200_no_redirect(self):
"""
Tests "redirect_url" is None if change_enrollment returns a HttpResponse
with no content
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponse()
response, _ = self._login_response(
'[email protected]',
'test_password',
extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
def _login_response(self, email, password, patched_audit_log='student.views.AUDIT_LOG', extra_post_params=None):
''' Post the login info '''
post_params = {'email': email, 'password': password}
if extra_post_params is not None:
post_params.update(extra_post_params)
with patch(patched_audit_log) as mock_audit_log:
result = self.client.post(self.url, post_params)
return result, mock_audit_log
def _assert_response(self, response, success=None, value=None):
'''
Assert that the response had status 200 and returned a valid
JSON-parseable dict.
If success is provided, assert that the response had that
value for 'success' in the JSON dict.
If value is provided, assert that the response contained that
value for 'value' in the JSON dict.
'''
self.assertEqual(response.status_code, 200)
try:
response_dict = json.loads(response.content)
except ValueError:
self.fail("Could not parse response content as JSON: %s"
% str(response.content))
if success is not None:
self.assertEqual(response_dict['success'], success)
if value is not None:
msg = ("'%s' did not contain '%s'" %
(str(response_dict['value']), str(value)))
self.assertTrue(value in response_dict['value'], msg)
def _assert_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertIn(log_string, format_string)
def _assert_not_in_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertNotIn(log_string, format_string)
class ExternalAuthShibTest(ModuleStoreTestCase):
"""
Tests how login_user() interacts with ExternalAuth, in particular Shib
"""
def setUp(self):
super(ExternalAuthShibTest, self).setUp()
self.course = CourseFactory.create(
org='Stanford',
number='456',
display_name='NO SHIB',
user_id=self.user.id,
)
self.shib_course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.user.id,
)
self.user_w_map = UserFactory.create(email='[email protected]')
self.extauth = ExternalAuthMap(external_id='[email protected]',
external_email='[email protected]',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=self.user_w_map)
self.user_w_map.save()
self.extauth.save()
self.user_wo_map = UserFactory.create(email='[email protected]')
self.user_wo_map.save()
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_page_redirect(self):
"""
Tests that when a shib user types their email address into the login page, they get redirected
to the shib login.
"""
response = self.client.post(reverse('login'), {'email': self.user_w_map.email, 'password': ''})
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertEqual(obj, {
'success': False,
'redirect': reverse('shib-login'),
})
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_required_dashboard(self):
"""
Tests redirects to when @login_required to dashboard, which should always be the normal login,
since there is no course context
"""
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://testserver/login?next=/dashboard')
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_externalauth_login_required_course_context(self):
"""
Tests the redirects when visiting course-specific URL with @login_required.
Should vary by course depending on its enrollment_domain
"""
TARGET_URL = reverse('courseware', args=[self.course.id.to_deprecated_string()]) # pylint: disable=invalid-name
noshib_response = self.client.get(TARGET_URL, follow=True)
self.assertEqual(noshib_response.redirect_chain[-1],
('http://testserver/login?next={url}'.format(url=TARGET_URL), 302))
self.assertContains(noshib_response, ("Sign in or Register | {platform_name}"
.format(platform_name=settings.PLATFORM_NAME)))
self.assertEqual(noshib_response.status_code, 200)
TARGET_URL_SHIB = reverse('courseware', args=[self.shib_course.id.to_deprecated_string()]) # pylint: disable=invalid-name
shib_response = self.client.get(**{'path': TARGET_URL_SHIB,
'follow': True,
'REMOTE_USER': self.extauth.external_id,
'Shib-Identity-Provider': 'https://idp.stanford.edu/'})
# Test that the shib-login redirect page with ?next= and the desired page are part of the redirect chain
# The 'courseware' page actually causes a redirect itself, so it's not the end of the chain and we
# won't test its contents
self.assertEqual(shib_response.redirect_chain[-3],
('http://testserver/shib-login/?next={url}'.format(url=TARGET_URL_SHIB), 302))
self.assertEqual(shib_response.redirect_chain[-2],
('http://testserver{url}'.format(url=TARGET_URL_SHIB), 302))
self.assertEqual(shib_response.status_code, 200)
@httpretty.activate
class LoginOAuthTokenMixin(ThirdPartyOAuthTestMixin):
"""
Mixin with tests for the login_oauth_token view. A TestCase that includes
this must define the following:
BACKEND: The name of the backend from python-social-auth
USER_URL: The URL of the endpoint that the backend retrieves user data from
UID_FIELD: The field in the user data that the backend uses as the user id
"""
def setUp(self):
super(LoginOAuthTokenMixin, self).setUp()
self.url = reverse(login_oauth_token, kwargs={"backend": self.BACKEND})
def _assert_error(self, response, status_code, error):
"""Assert that the given response was a 400 with the given error code"""
self.assertEqual(response.status_code, status_code)
self.assertEqual(json.loads(response.content), {"error": error})
self.assertNotIn("partial_pipeline", self.client.session)
def test_success(self):
self._setup_provider_response(success=True)
response = self.client.post(self.url, {"access_token": "dummy"})
self.assertEqual(response.status_code, 204)
self.assertEqual(self.client.session['_auth_user_id'], self.user.id) # pylint: disable=no-member
def test_invalid_token(self):
self._setup_provider_response(success=False)
response = self.client.post(self.url, {"access_token": "dummy"})
self._assert_error(response, 401, "invalid_token")
def test_missing_token(self):
response = self.client.post(self.url)
self._assert_error(response, 400, "invalid_request")
def test_unlinked_user(self):
UserSocialAuth.objects.all().delete()
self._setup_provider_response(success=True)
response = self.client.post(self.url, {"access_token": "dummy"})
self._assert_error(response, 401, "invalid_token")
def test_get_method(self):
response = self.client.get(self.url, {"access_token": "dummy"})
self.assertEqual(response.status_code, 405)
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
class LoginOAuthTokenTestFacebook(LoginOAuthTokenMixin, ThirdPartyOAuthTestMixinFacebook, TestCase):
"""Tests login_oauth_token with the Facebook backend"""
pass
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled")
class LoginOAuthTokenTestGoogle(LoginOAuthTokenMixin, ThirdPartyOAuthTestMixinGoogle, TestCase):
"""Tests login_oauth_token with the Google backend"""
pass
|
agpl-3.0
|
dudymas/python-openstacksdk
|
openstack/compute/v2/server_interface.py
|
3
|
1503
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.compute import compute_service
from openstack import resource
class ServerInterface(resource.Resource):
id_attribute = 'mac_addr'
resource_key = 'interfaceAttachment'
resources_key = 'interfaceAttachments'
base_path = '/servers/%(server_id)s/os-interface'
service = compute_service.ComputeService()
# capabilities
allow_create = True
allow_retrieve = True
allow_update = False
allow_delete = True
allow_list = True
# Properties
#: Fixed IP addresses with subnet IDs.
fixed_ips = resource.prop('fixed_ips')
#: The MAC address.
mac_addr = resource.prop('mac_addr')
#: The network ID.
net_id = resource.prop('net_id')
#: The ID of the port for which you want to create an interface.
port_id = resource.prop('port_id')
#: The port state.
port_state = resource.prop('port_state')
#: The UUID for the server.
server_id = resource.prop('server_id')
|
apache-2.0
|
alqfahad/odoo
|
addons/event/__openerp__.py
|
261
|
2296
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Events Organisation',
'version': '0.1',
'website' : 'https://www.odoo.com/page/events',
'category': 'Tools',
'summary': 'Trainings, Conferences, Meetings, Exhibitions, Registrations',
'description': """
Organization and management of Events.
======================================
The event module allows you to efficiently organise events and all related tasks: planification, registration tracking,
attendances, etc.
Key Features
------------
* Manage your Events and Registrations
* Use emails to automatically confirm and send acknowledgements for any event registration
""",
'author': 'OpenERP SA',
'depends': ['base_setup', 'board', 'email_template', 'marketing'],
'data': [
'security/event_security.xml',
'security/ir.model.access.csv',
'wizard/event_confirm_view.xml',
'event_view.xml',
'event_data.xml',
'report/report_event_registration_view.xml',
'res_partner_view.xml',
'email_template.xml',
'views/event.xml',
],
'demo': [
'event_demo.xml',
],
'test': [
'test/ui/event_users.yml',
'test/process/event_draft2done.yml'
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
USGSDenverPychron/pychron
|
pychron/loading/tasks/actions.py
|
1
|
1926
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.tasks.action.task_action import TaskAction
from pychron.envisage.resources import icon
# ============= standard library imports ========================
# ============= local library imports ==========================
class SaveLoadingDBAction(TaskAction):
name = 'Save DB'
method = 'save_loading_db'
image = icon('database_save')
class SaveLoadingPDFAction(TaskAction):
name = 'Save PDF'
method = 'save_loading_pdf'
image = icon('file_pdf')
class SaveTrayPDFAction(TaskAction):
name = 'Save Tray PDF'
method = 'save_tray_pdf'
image = icon('file_pdf')
class ConfigurePDFAction(TaskAction):
name = 'Configure PDF'
method = 'configure_pdf'
image = icon('cog')
class EntryAction(TaskAction):
name = 'Entry'
method = 'set_entry'
class InfoAction(TaskAction):
name = 'Info'
method = 'set_info'
class EditAction(TaskAction):
name = 'Edit'
method = 'set_edit'
class GenerateResultsAction(TaskAction):
name = 'Generate Results'
method = 'generate_results'
# ============= EOF =============================================
|
apache-2.0
|
eiginn/passpie
|
passpie/process.py
|
1
|
1326
|
import logging
import os
from subprocess import Popen, PIPE
from ._compat import *
DEVNULL = open(os.devnull, 'w')
class Proc(Popen):
def communicate(self, **kwargs):
if kwargs.get('input') and isinstance(kwargs['input'], basestring):
kwargs['input'] = kwargs['input'].encode('utf-8')
return super(Proc, self).communicate(**kwargs)
def __exit__(self, *args, **kwargs):
if hasattr(super(Proc, self), '__exit__'):
super(Proc, self).__exit__(*args, **kwargs)
def __enter__(self, *args, **kwargs):
if hasattr(super(Proc, self), '__enter__'):
return super(Proc, self).__enter__(*args, **kwargs)
return self
def call(*args, **kwargs):
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
stderr = PIPE
else:
stderr = DEVNULL
kwargs.setdefault('stderr', stderr)
kwargs.setdefault('stdout', PIPE)
kwargs.setdefault('stdin', PIPE)
kwargs.setdefault('shell', False)
kwargs_input = kwargs.pop('input', None)
with Proc(*args, **kwargs) as proc:
output, error = proc.communicate(input=kwargs_input)
try:
output = output.decode('utf-8')
error = error.decode('utf-8')
except AttributeError:
pass
return output, error
|
mit
|
lucienfostier/gaffer
|
python/GafferSceneTest/ParentConstraintTest.py
|
2
|
6192
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class ParentConstraintTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
plane1 = GafferScene.Plane()
plane1["transform"]["translate"].setValue( imath.V3f( 1, 2, 3 ) )
plane1["transform"]["scale"].setValue( imath.V3f( 1, 2, 3 ) )
plane1["transform"]["rotate"].setValue( imath.V3f( 1000, 20, 39 ) )
plane1["name"].setValue( "target" )
plane2 = GafferScene.Plane()
plane2["name"].setValue( "constrained" )
group = GafferScene.Group()
group["in"][0].setInput( plane1["out"] )
group["in"][1].setInput( plane2["out"] )
self.assertSceneValid( group["out"] )
constraint = GafferScene.ParentConstraint()
constraint["target"].setValue( "/group/target" )
constraint["in"].setInput( group["out"] )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/constrained" ] ) )
constraint["filter"].setInput( filter["out"] )
self.assertSceneValid( constraint["out"] )
self.assertEqual( constraint["out"].fullTransform( "/group/constrained" ), group["out"].fullTransform( "/group/target" ) )
def testRelativeTransform( self ) :
plane1 = GafferScene.Plane()
plane1["transform"]["translate"].setValue( imath.V3f( 1, 2, 3 ) )
plane1["transform"]["rotate"].setValue( imath.V3f( 0, 90, 0 ) )
plane1["name"].setValue( "target" )
plane2 = GafferScene.Plane()
plane2["name"].setValue( "constrained" )
group = GafferScene.Group()
group["in"][0].setInput( plane1["out"] )
group["in"][1].setInput( plane2["out"] )
self.assertSceneValid( group["out"] )
constraint = GafferScene.ParentConstraint()
constraint["target"].setValue( "/group/target" )
constraint["in"].setInput( group["out"] )
constraint["relativeTransform"]["translate"].setValue( imath.V3f( 1, 0, 0 ) )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/constrained" ] ) )
constraint["filter"].setInput( filter["out"] )
self.assertSceneValid( constraint["out"] )
self.assertEqual( constraint["out"].fullTransform( "/group/constrained" ), imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) * group["out"].fullTransform( "/group/target" ) )
def testDirtyPropagation( self ) :
plane1 = GafferScene.Plane()
plane2 = GafferScene.Plane()
group = GafferScene.Group()
group["in"][0].setInput( plane1["out"] )
group["in"][1].setInput( plane2["out"] )
constraint = GafferScene.ParentConstraint()
constraint["target"].setValue( "/group/target" )
constraint["in"].setInput( group["out"] )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/constrained" ] ) )
constraint["filter"].setInput( filter["out"] )
cs = GafferTest.CapturingSlot( constraint.plugDirtiedSignal() )
constraint["relativeTransform"]["translate"]["x"].setValue( 10 )
plugs = { x[0] for x in cs if not x[0].getName().startswith( "__" ) }
self.assertEqual(
plugs,
{
constraint["relativeTransform"]["translate"]["x"],
constraint["relativeTransform"]["translate"],
constraint["relativeTransform"],
constraint["out"]["bound"],
constraint["out"]["childBounds"],
constraint["out"]["transform"],
constraint["out"]
}
)
def testParentNodeEquivalence( self ) :
plane1 = GafferScene.Plane()
plane1["name"].setValue( "target" )
plane2 = GafferScene.Plane()
plane2["name"].setValue( "constrained" )
plane1["transform"]["rotate"]["y"].setValue( 45 )
plane2["transform"]["translate"]["x"].setValue( 1 )
parent = GafferScene.Parent()
parent["in"].setInput( plane1["out"] )
parent["parent"].setValue( "/target" )
parent["children"][0].setInput( plane2["out"] )
group = GafferScene.Group()
group["in"][0].setInput( plane1["out"] )
group["in"][1].setInput( plane2["out"] )
constraint = GafferScene.ParentConstraint()
constraint["in"].setInput( group["out"] )
constraint["target"].setValue( "/group/target" )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/constrained" ] ) )
constraint["filter"].setInput( filter["out"] )
self.assertEqual( parent["out"].fullTransform( "/target/constrained" ), constraint["out"].fullTransform( "/group/constrained" ) )
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
StackOps/fabuloso-catalog-360
|
automation/automation.py
|
1
|
4992
|
# Copyright 2012-2013 STACKOPS TECHNOLOGIES S.L.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fabric.api import settings, sudo
from cuisine import package_ensure, package_clean
def stop():
with settings(warn_only=True):
sudo("nohup service apirestd stop")
sudo("nohup service discovery-agent stop")
sudo("nohup service events-agent stop")
sudo("nohup service health-system stop")
sudo("nohup service celeryd stop")
def start():
sudo("python /var/lib/stackops-head/bin/head-init initialize all "
"2>/dev/null")
sudo("nohup service apirestd restart")
sudo("nohup service discovery-agent restart")
sudo("nohup service events-agent restart")
sudo("nohup service health-system restart")
sudo("nohup service celeryd restart")
def configure_ubuntu_packages():
"""Configure portal packages"""
package_ensure('stackops-head')
def uninstall_ubuntu_packages():
"""Uninstall portal packages"""
package_clean('stackops-head')
def install(dhcp_start, dhcp_end, dhcp_listen_interface, gateway,
netmask, domain, dns, license_manager_url,
license_token='vs0QiaN9TA6lIIe3uPSfiG3fs',
download_iso="False",enable_dhcp="True"):
"""Generate automation configuration."""
sudo('echo stackops-head stackops-head/accepted-stackops-license '
'boolean true | debconf-set-selections')
sudo('echo stackops-head stackops-head/dhcp-start string %s | '
'debconf-set-selections' % dhcp_start)
sudo('echo stackops-head stackops-head/dhcp-end string %s | '
'debconf-set-selections' % dhcp_end)
sudo('echo stackops-head stackops-head/dhcp_listen_interface string %s | '
'debconf-set-selections' % dhcp_listen_interface)
sudo('echo stackops-head stackops-head/domain string %s | '
'debconf-set-selections' % domain)
sudo('echo stackops-head stackops-head/gateway string %s | '
'debconf-set-selections' % gateway)
sudo('echo stackops-head stackops-head/netmask string %s | '
'debconf-set-selections' % netmask)
sudo('echo stackops-head stackops-head/dns string %s | '
'debconf-set-selections' % dns)
sudo('echo stackops-head stackops-head/download-stackops boolean %s '
'| debconf-set-selections' % str(download_iso).lower())
sudo('echo stackops-head stackops-head/enable_dhcp boolean %s '
'| debconf-set-selections' % str(enable_dhcp).lower())
sudo('echo stackops-head stackops-head/license-manager-url string %s | '
'debconf-set-selections' % license_manager_url)
sudo('echo stackops-head stackops-head/license-manager-token string %s | '
'debconf-set-selections' % license_token)
configure_ubuntu_packages()
def configure(endpoint,
token_service,
mysql_username,
mysql_password,
automation_user,
automation_password,
mysql_schema="stackopshead",
mysql_host="127.0.0.1",
mysql_port="3306"):
"""Configure mysql in automation"""
sql_connection = ("mysql://" + mysql_username + ":" + mysql_password +
"@" + mysql_host + ":" + mysql_port + "/" + mysql_schema)
sudo('sed -e "s,^--sql_connection\s*=\s*.\+$,--sql_connection=%s," '
'-i /var/lib/stackops-head/etc/*.conf ' % sql_connection)
"""Configure keystone related in automation"""
sudo('sed -e "s,^--automation_user\s*=\s*.\+$,--automation_user=%s," '
'-i /var/lib/stackops-head/etc/stackops-head-apirest-daemon.conf'
% automation_user)
sudo('sed -e "s,^--automation_password\s*=\s*.\+$,'
'--automation_password=%s," -i '
'/var/lib/stackops-head/etc/stackops-head-apirest-daemon.conf'
% automation_password)
uri_keystone_validation = endpoint + '/tokens/'
sudo('sed -e "s,^--use_authorization\s*=\s*.\+$,--use_authorization=%s," '
'-i /var/lib/stackops-head/etc/stackops-head-apirest-daemon.conf'
% "True")
sudo('sed -e "s,^--uri_keystone_validation\s*=\s*.\+$,'
'--uri_keystone_validation=%s," '
'-i /var/lib/stackops-head/etc/stackops-head-apirest-daemon.conf '
% uri_keystone_validation)
sudo('sed -e "s,^--token_service\s*=\s*.\+$,'
'--token_service=%s," '
'-i /var/lib/stackops-head/etc/stackops-head-apirest-daemon.conf '
% token_service)
|
apache-2.0
|
TansyArron/pants
|
src/python/pants/backend/python/tasks/python_eval.py
|
4
|
6324
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import pkgutil
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.tasks.python_task import PythonTask
from pants.base.exceptions import TaskError
from pants.base.generator import Generator, TemplateData
from pants.base.workunit import WorkUnit, WorkUnitLabel
class PythonEval(PythonTask):
class Error(TaskError):
"""A richer failure exception type useful for tests."""
def __init__(self, *args, **kwargs):
compiled = kwargs.pop('compiled')
failed = kwargs.pop('failed')
super(PythonEval.Error, self).__init__(*args, **kwargs)
self.compiled = compiled
self.failed = failed
_EVAL_TEMPLATE_PATH = os.path.join('templates', 'python_eval', 'eval.py.mustache')
@staticmethod
def _is_evalable(target):
return isinstance(target, (PythonLibrary, PythonBinary))
@classmethod
def register_options(cls, register):
super(PythonEval, cls).register_options(register)
register('--fail-slow', action='store_true', default=False,
help='Compile all targets and present the full list of errors.')
register('--closure', action='store_true', default=False,
help='Eval all targets in the closure individually instead of just the targets '
'specified on the command line.')
def execute(self):
targets = self.context.targets() if self.get_options().closure else self.context.target_roots
with self.invalidated(filter(self._is_evalable, targets),
topological_order=True) as invalidation_check:
compiled = self._compile_targets(invalidation_check.invalid_vts)
return compiled # Collected and returned for tests
def _compile_targets(self, invalid_vts):
with self.context.new_workunit(name='eval-targets', labels=[WorkUnitLabel.MULTITOOL]):
compiled = []
failed = []
for vt in invalid_vts:
target = vt.target
return_code = self._compile_target(target)
if return_code == 0:
vt.update() # Ensure partial progress is marked valid
compiled.append(target)
else:
if self.get_options().fail_slow:
failed.append(target)
else:
raise self.Error('Failed to eval {}'.format(target.address.spec),
compiled=compiled,
failed=[target])
if failed:
msg = 'Failed to evaluate {} targets:\n {}'.format(
len(failed),
'\n '.join(t.address.spec for t in failed))
raise self.Error(msg, compiled=compiled, failed=failed)
return compiled
def _compile_target(self, target):
# "Compiles" a target by forming an isolated chroot of its sources and transitive deps and then
# attempting to import each of the target's sources in the case of a python library or else the
# entry point in the case of a python binary.
#
# For a library with sources lib/core.py and lib/util.py a "compiler" main file would look like:
#
# if __name__ == '__main__':
# import lib.core
# import lib.util
#
# For a binary with entry point lib.bin:main the "compiler" main file would look like:
#
# if __name__ == '__main__':
# from lib.bin import main
#
# In either case the main file is executed within the target chroot to reveal missing BUILD
# dependencies.
with self.context.new_workunit(name=target.address.spec):
modules = []
if isinstance(target, PythonBinary):
source = 'entry_point {}'.format(target.entry_point)
components = target.entry_point.rsplit(':', 1)
module = components[0]
if len(components) == 2:
function = components[1]
data = TemplateData(source=source,
import_statement='from {} import {}'.format(module, function))
else:
data = TemplateData(source=source, import_statement='import {}'.format(module))
modules.append(data)
else:
for path in target.sources_relative_to_source_root():
if path.endswith('.py'):
if os.path.basename(path) == '__init__.py':
module_path = os.path.dirname(path)
else:
module_path, _ = os.path.splitext(path)
source = 'file {}'.format(os.path.join(target.target_base, path))
module = module_path.replace(os.path.sep, '.')
data = TemplateData(source=source, import_statement='import {}'.format(module))
modules.append(data)
if not modules:
# Nothing to eval, so a trivial compile success.
return 0
interpreter = self.select_interpreter_for_targets([target])
if isinstance(target, PythonBinary):
pexinfo, platforms = target.pexinfo, target.platforms
else:
pexinfo, platforms = None, None
generator = Generator(pkgutil.get_data(__name__, self._EVAL_TEMPLATE_PATH),
chroot_parent=self.chroot_cache_dir, modules=modules)
executable_file_content = generator.render()
with self.cached_chroot(interpreter=interpreter, pex_info=pexinfo,
targets=[target], platforms=platforms,
executable_file_content=executable_file_content) as chroot:
pex = chroot.pex()
with self.context.new_workunit(name='eval',
labels=[WorkUnitLabel.COMPILER, WorkUnitLabel.RUN, WorkUnitLabel.TOOL],
cmd=' '.join(pex.cmdline())) as workunit:
returncode = pex.run(stdout=workunit.output('stdout'), stderr=workunit.output('stderr'))
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
if returncode != 0:
self.context.log.error('Failed to eval {}'.format(target.address.spec))
return returncode
|
apache-2.0
|
gpotter2/scapy
|
scapy/contrib/automotive/obd/pid/pids_80_9F.py
|
5
|
7374
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Andreas Korb <[email protected]>
# Copyright (C) Nils Weiss <[email protected]>
# This program is published under a GPLv2 license
# scapy.contrib.status = skip
from scapy.fields import StrFixedLenField, FlagsField, ScalingField, BitField
from scapy.contrib.automotive.obd.packet import OBD_Packet
# See https://en.wikipedia.org/wiki/OBD-II_PIDs for further information
# PID = Parameter IDentification
class OBD_PID80(OBD_Packet):
name = "PID_80_PIDsSupported"
fields_desc = [
FlagsField('supported_pids', 0, 32, [
'PIDA0',
'PID9F',
'PID9E',
'PID9D',
'PID9C',
'PID9B',
'PID9A',
'PID99',
'PID98',
'PID97',
'PID96',
'PID95',
'PID94',
'PID93',
'PID92',
'PID91',
'PID90',
'PID8F',
'PID8E',
'PID8D',
'PID8C',
'PID8B',
'PID8A',
'PID89',
'PID88',
'PID87',
'PID86',
'PID85',
'PID84',
'PID83',
'PID82',
'PID81'
])
]
class OBD_PID81(OBD_Packet):
name = "PID_81_EngineRunTimeForAuxiliaryEmissionsControlDevice"
fields_desc = [
BitField('reserved', 0, 3),
BitField('total_run_time_with_ei_aecd5_supported', 0, 1),
BitField('total_run_time_with_ei_aecd4_supported', 0, 1),
BitField('total_run_time_with_ei_aecd3_supported', 0, 1),
BitField('total_run_time_with_ei_aecd2_supported', 0, 1),
BitField('total_run_time_with_ei_aecd1_supported', 0, 1),
ScalingField('total_run_time_with_ei_aecd1', 0, unit='sec',
fmt='Q'),
ScalingField('total_run_time_with_ei_aecd2', 0, unit='sec',
fmt='Q'),
ScalingField('total_run_time_with_ei_aecd3', 0, unit='sec',
fmt='Q'),
ScalingField('total_run_time_with_ei_aecd4', 0, unit='sec',
fmt='Q'),
ScalingField('total_run_time_with_ei_aecd5', 0, unit='sec',
fmt='Q'),
]
class OBD_PID82(OBD_Packet):
name = "PID_82_EngineRunTimeForAuxiliaryEmissionsControlDevice"
fields_desc = [
BitField('reserved', 0, 3),
BitField('total_run_time_with_ei_aecd10_supported', 0, 1),
BitField('total_run_time_with_ei_aecd9_supported', 0, 1),
BitField('total_run_time_with_ei_aecd8_supported', 0, 1),
BitField('total_run_time_with_ei_aecd7_supported', 0, 1),
BitField('total_run_time_with_ei_aecd6_supported', 0, 1),
ScalingField('total_run_time_with_ei_aecd6', 0, unit='sec',
fmt='Q'),
ScalingField('total_run_time_with_ei_aecd7', 0, unit='sec',
fmt='Q'),
ScalingField('total_run_time_with_ei_aecd8', 0, unit='sec',
fmt='Q'),
ScalingField('total_run_time_with_ei_aecd9', 0, unit='sec',
fmt='Q'),
ScalingField('total_run_time_with_ei_aecd10', 0, unit='sec',
fmt='Q'),
]
class OBD_PID83(OBD_Packet):
name = "PID_83_NOxSensor"
fields_desc = [
BitField('reserved', 0, 6),
BitField('nox_sensor_concentration_bank2_sensor1_supported', 0, 1),
BitField('nox_sensor_concentration_bank1_sensor1_supported', 0, 1),
ScalingField('nox_sensor_concentration_bank1_sensor1', 0, unit='ppm',
fmt='H'),
ScalingField('nox_sensor_concentration_bank2_sensor1', 0, unit='ppm',
fmt='H'),
]
class OBD_PID84(OBD_Packet):
name = "PID_84_ManifoldSurfaceTemperature"
fields_desc = [
StrFixedLenField('data', b'', 1)
]
class OBD_PID85(OBD_Packet):
name = "PID_85_NoxReagentSystem"
fields_desc = [
StrFixedLenField('data', b'', 10)
]
class OBD_PID86(OBD_Packet):
name = "PID_86_ParticulateMatterSensor"
fields_desc = [
StrFixedLenField('data', b'', 5)
]
class OBD_PID87(OBD_Packet):
name = "PID_87_IntakeManifoldAbsolutePressure"
fields_desc = [
StrFixedLenField('data', b'', 5)
]
class OBD_PID88(OBD_Packet):
name = "PID_88_ScrInduceSystem"
fields_desc = [
StrFixedLenField('data', b'', 13)
]
class OBD_PID89(OBD_Packet):
# 11 - 15
name = "PID_89_RunTimeForAecd"
fields_desc = [
StrFixedLenField('data', b'', 41)
]
class OBD_PID8A(OBD_Packet):
# 16 - 20
name = "PID_8A_RunTimeForAecd"
fields_desc = [
StrFixedLenField('data', b'', 41)
]
class OBD_PID8B(OBD_Packet):
name = "PID_8B_DieselAftertreatment"
fields_desc = [
StrFixedLenField('data', b'', 7)
]
class OBD_PID8C(OBD_Packet):
name = "PID_8C_O2Sensor"
fields_desc = [
StrFixedLenField('data', b'', 16)
]
class OBD_PID8D(OBD_Packet):
name = "PID_8D_ThrottlePositionG"
fields_desc = [
StrFixedLenField('data', b'', 1)
]
class OBD_PID8E(OBD_Packet):
name = "PID_8E_EngineFrictionPercentTorque"
fields_desc = [
StrFixedLenField('data', b'', 1)
]
class OBD_PID8F(OBD_Packet):
name = "PID_8F_PmSensorBank1And2"
fields_desc = [
StrFixedLenField('data', b'', 5)
]
class OBD_PID90(OBD_Packet):
name = "PID_90_WwhObdVehicleObdSystemInformation"
fields_desc = [
StrFixedLenField('data', b'', 3)
]
class OBD_PID91(OBD_Packet):
name = "PID_91_WwhObdVehicleObdSystemInformation"
fields_desc = [
StrFixedLenField('data', b'', 5)
]
class OBD_PID92(OBD_Packet):
name = "PID_92_FuelSystemControl"
fields_desc = [
StrFixedLenField('data', b'', 2)
]
class OBD_PID93(OBD_Packet):
name = "PID_93_WwhObdVehicleObdCountersSupport"
fields_desc = [
StrFixedLenField('data', b'', 3)
]
class OBD_PID94(OBD_Packet):
name = "PID_94_NoxWarningAndInducementSystem"
fields_desc = [
StrFixedLenField('data', b'', 12)
]
class OBD_PID98(OBD_Packet):
name = "PID_98_ExhaustGasTemperatureSensor"
fields_desc = [
StrFixedLenField('data', b'', 9)
]
class OBD_PID99(OBD_Packet):
name = "PID_99_ExhaustGasTemperatureSensor"
fields_desc = [
StrFixedLenField('data', b'', 9)
]
class OBD_PID9A(OBD_Packet):
name = "PID_9A_HybridEvVehicleSystemDataBatteryVoltage"
fields_desc = [
StrFixedLenField('data', b'', 6)
]
class OBD_PID9B(OBD_Packet):
name = "PID_9B_DieselExhaustFluidSensorData"
fields_desc = [
StrFixedLenField('data', b'', 4)
]
class OBD_PID9C(OBD_Packet):
name = "PID_9C_O2SensorData"
fields_desc = [
StrFixedLenField('data', b'', 17)
]
class OBD_PID9D(OBD_Packet):
name = "PID_9D_EngineFuelRate"
fields_desc = [
StrFixedLenField('data', b'', 4)
]
class OBD_PID9E(OBD_Packet):
name = "PID_9E_EngineExhaustFlowRate"
fields_desc = [
StrFixedLenField('data', b'', 2)
]
class OBD_PID9F(OBD_Packet):
name = "PID_9F_FuelSystemPercentageUse"
fields_desc = [
StrFixedLenField('data', b'', 9)
]
|
gpl-2.0
|
ravenland/ycmWinRepo
|
third_party/ycmd/third_party/jedi/test/completion/basic.py
|
8
|
4900
|
# -----------------
# cursor position
# -----------------
#? 0 int
int()
#? 3 int
int()
#? 4 str
int(str)
# -----------------
# should not complete
# -----------------
#? []
.
#? []
str..
#? []
a(0):.
# -----------------
# if/else/elif
# -----------------
if 1:
1
elif(3):
a = 3
else:
a = ''
#? int() str()
a
def func():
if 1:
1
elif(3):
a = 3
else:
a = ''
#? int() str()
return a
#? int() str()
func()
# -----------------
# keywords
# -----------------
#? list()
assert []
def focus_return():
#? list
return []
# -----------------
# for loops
# -----------------
for a in [1,2]:
#? int()
a
for a1 in 1,"":
#? int() str()
a1
for a3, b3 in (1,""), (1,""), (1,""):
#? int()
a3
#? str()
b3
for a4, (b4, c4) in (1,("", list)), (1,("", list)):
#? int()
a4
#? str()
b4
#? list
c4
a = []
for i in [1,'']:
#? int() str()
i
a += [i]
#? int() str()
a[0]
for i in list([1,'']):
#? int() str()
i
#? int() str()
for x in [1,'']: x
a = []
b = [1.0,'']
for i in b:
a += [i]
#? float() str()
a[0]
# -----------------
# range()
# -----------------
for i in range(10):
#? int()
i
# -----------------
# list comprehensions
# -----------------
# basics:
a = ['' for a in [1]]
#? str()
a[0]
a = [a for a in [1]]
#? int()
a[0]
a = [a for a in 1,2]
#? int()
a[0]
a = [a for a,b in [(1,'')]]
#? int()
a[0]
arr = [1,'']
a = [a for a in arr]
#? int() str()
a[0]
a = [a if 1.0 else '' for a in [1] if [1.0]]
#? int() str()
a[0]
# name resolve should be correct
left, right = 'a', 'b'
left, right = [x for x in (left, right)]
#? str()
left
# with a dict literal
#? str()
[a for a in {1:'x'}][0]
##? str()
{a-1:b for a,b in {1:'a', 3:1.0}.items()}[0]
# list comprehensions should also work in combination with functions
def listen(arg):
for x in arg:
#? str()
x
listen(['' for x in [1]])
#? str()
([str for x in []])[0]
# -----------------
# nested list comprehensions
# -----------------
b = [a for arr in [[1]] for a in arr]
#? int()
b[0]
b = [a for arr in [[1]] if '' for a in arr if '']
#? int()
b[0]
b = [b for arr in [[[1.0]]] for a in arr for b in a]
#? float()
b[0]
# jedi issue #26
#? list()
a = [[int(v) for v in line.strip().split() if v] for line in ["123", "123", "123"] if line]
#? list()
a[0]
#? int()
a[0][0]
# -----------------
# generator comprehensions
# -----------------
left, right = (i for i in (1, ''))
#? int()
left
gen = (i for i in (1,))
#? int()
next(gen)
#?
gen[0]
gen = (a for arr in [[1.0]] for a in arr)
#? float()
next(gen)
#? int()
(i for i in (1,)).send()
# issues with different formats
left, right = (i for i in
('1', '2'))
#? str()
left
# -----------------
# ternary operator
# -----------------
a = 3
b = '' if a else set()
#? str() set()
b
def ret(a):
return ['' if a else set()]
#? str() set()
ret(1)[0]
#? str() set()
ret()[0]
# -----------------
# with statements
# -----------------
with open('') as f:
#? ['closed']
f.closed
with open('') as f1, open('') as f2:
#? ['closed']
f1.closed
#? ['closed']
f2.closed
# -----------------
# global vars
# -----------------
def global_define():
global global_var_in_func
global_var_in_func = 3
#? int()
global_var_in_func
# -----------------
# within docstrs
# -----------------
def a():
"""
#? ['global_define']
global_define
"""
pass
#?
# str literals in comment """ upper
# -----------------
# magic methods
# -----------------
class A(object): pass
class B(): pass
#? ['__init__']
A.__init__
#? ['__init__']
B.__init__
#? ['__init__']
int().__init__
# -----------------
# comments
# -----------------
class A():
def __init__(self):
self.hello = {} # comment shouldn't be a string
#? dict()
A().hello
# -----------------
# unicode
# -----------------
a = 'smörbröd'
#? str()
a
xyz = 'smörbröd.py'
if 1:
#? str()
xyz
# -----------------
# exceptions
# -----------------
try:
import math
except ImportError as i_a:
#? ['i_a']
i_a
#? ImportError()
i_a
try:
import math
except ImportError, i_b:
#? ['i_b']
i_b
#? ImportError()
i_b
class MyException(Exception):
def __init__(self, my_attr):
self.my_attr = my_attr
try:
raise MyException(1)
except MyException as e:
#? ['my_attr']
e.my_attr
#? 22 ['my_attr']
for x in e.my_attr:
pass
# -----------------
# continuations
# -----------------
foo = \
1
#? int()
foo
# -----------------
# if `is not` checks
# -----------------
foo = ['a']
if foo is not None:
foo = ''.join(foo)
#? str()
foo
# -----------------
# module attributes
# -----------------
# Don't move this to imports.py, because there's a star import.
#? str()
__file__
#? ['__file__']
__file__
|
gpl-3.0
|
janebeckman/gpdb
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/access_methods/duplicate_entries/test_duplicate_entries.py
|
7
|
2374
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from mpp.lib.PSQL import PSQL
from tinctest.lib import local_path
from tinctest.models.scenario import ScenarioTestCase
class DuplicateEntriesScenarioTestCase(ScenarioTestCase):
"""
Automate the verification of MPP-19038 where duplicate entries were reporting but only 1 in table
@gucs gp_create_table_random_default_distribution=off
"""
def __init__(self, methodName):
super(DuplicateEntriesScenarioTestCase,self).__init__(methodName)
@classmethod
def setUpClass(cls):
tinctest.logger.info('Creating the tables & Initial setup')
PSQL.run_sql_file(sql_file=local_path('create_tables.sql'),
out_file=local_path('create_tables.out'))
@classmethod
def tearDownClass(cls):
pass
def test_mpp19038(self):
test_case_list0 = []
test_case_list0.append("mpp.gpdb.tests.storage.access_methods.duplicate_entries.DuplicateEntriesTestCase.killProcess_postmaster")
self.test_case_scenario.append(test_case_list0)
test_case_list1 = []
test_case_list1.append("mpp.gpdb.tests.storage.access_methods.duplicate_entries.DuplicateEntriesTestCase.reindex_vacuum")
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.access_methods.duplicate_entries.DuplicateEntriesTestCase.run_recover_rebalance")
self.test_case_scenario.append(test_case_list2)
test_case_list3 = []
test_case_list3.append("mpp.gpdb.tests.storage.access_methods.duplicate_entries.DuplicateEntriesTestCase.check_duplicate_entry")
self.test_case_scenario.append(test_case_list3)
|
apache-2.0
|
mavenlin/tensorflow
|
tensorflow/examples/speech_commands/label_wav_test.py
|
53
|
2298
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for WAVE file labeling tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
from tensorflow.examples.speech_commands import label_wav
from tensorflow.python.platform import test
class LabelWavTest(test.TestCase):
def _getWavData(self):
with self.test_session() as sess:
sample_data = tf.zeros([1000, 2])
wav_encoder = contrib_audio.encode_wav(sample_data, 16000)
wav_data = sess.run(wav_encoder)
return wav_data
def _saveTestWavFile(self, filename, wav_data):
with open(filename, "wb") as f:
f.write(wav_data)
def testLabelWav(self):
tmp_dir = self.get_temp_dir()
wav_data = self._getWavData()
wav_filename = os.path.join(tmp_dir, "wav_file.wav")
self._saveTestWavFile(wav_filename, wav_data)
input_name = "test_input"
output_name = "test_output"
graph_filename = os.path.join(tmp_dir, "test_graph.pb")
with tf.Session() as sess:
tf.placeholder(tf.string, name=input_name)
tf.zeros([1, 3], name=output_name)
with open(graph_filename, "wb") as f:
f.write(sess.graph.as_graph_def().SerializeToString())
labels_filename = os.path.join(tmp_dir, "test_labels.txt")
with open(labels_filename, "w") as f:
f.write("a\nb\nc\n")
label_wav.label_wav(wav_filename, labels_filename, graph_filename,
input_name + ":0", output_name + ":0", 3)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
kkouer/PcGcs
|
Lib/site-packages/numpy/lib/benchmarks/bench_arraysetops.py
|
65
|
1615
|
import numpy as np
import time
from numpy.lib.arraysetops import *
def bench_unique1d( plot_results = False ):
exponents = np.linspace( 2, 7, 9 )
ratios = []
nItems = []
dt1s = []
dt2s = []
for ii in exponents:
nItem = 10 ** ii
print 'using %d items:' % nItem
a = np.fix( nItem / 10 * np.random.random( nItem ) )
print 'unique:'
tt = time.clock()
b = np.unique( a )
dt1 = time.clock() - tt
print dt1
print 'unique1d:'
tt = time.clock()
c = unique1d( a )
dt2 = time.clock() - tt
print dt2
if dt1 < 1e-8:
ratio = 'ND'
else:
ratio = dt2 / dt1
print 'ratio:', ratio
print 'nUnique: %d == %d\n' % (len( b ), len( c ))
nItems.append( nItem )
ratios.append( ratio )
dt1s.append( dt1 )
dt2s.append( dt2 )
assert np.alltrue( b == c )
print nItems
print dt1s
print dt2s
print ratios
if plot_results:
import pylab
def plotMe( fig, fun, nItems, dt1s, dt2s ):
pylab.figure( fig )
fun( nItems, dt1s, 'g-o', linewidth = 2, markersize = 8 )
fun( nItems, dt2s, 'b-x', linewidth = 2, markersize = 8 )
pylab.legend( ('unique', 'unique1d' ) )
pylab.xlabel( 'nItem' )
pylab.ylabel( 'time [s]' )
plotMe( 1, pylab.loglog, nItems, dt1s, dt2s )
plotMe( 2, pylab.plot, nItems, dt1s, dt2s )
pylab.show()
if __name__ == '__main__':
bench_unique1d( plot_results = True )
|
gpl-3.0
|
staer/mosbius
|
mosbius/fabfile.py
|
1
|
1286
|
from __future__ import with_statement
from fabric.api import *
from fabric.contrib.console import confirm
def deploy(revision=''):
if revision!='':
print "Deploying to server using revision: %s" % revision
updateSource(revision)
migrateDatabase()
restartApache()
def backupDatabase():
""" Make a backup copy of the database just in a migration fails """
with cd('~/backups'):
run('./mosbius.sh')
def migrateDatabase():
backupDatabase()
# In fabric 1.0 we can use the "prefix" context manager instead of
# concatenating commands to run in a virtual environment
with cd('~/webapps/mosbius_website/mosbius/mosbius'):
run('workon mosbius && python manage.py migrate')
def updateSource(revision=''):
with cd('~/webapps/mosbius_website/mosbius/'):
run('hg pull http://www.bitbucket.org/dih0658/mosbius/')
if revision == '':
run('hg update')
else:
run('hg update -r %s' % revision)
def updateDependencies():
with cd('~/webapps/mosbius_website/mosbius'):
run('workon mosbius && pip install -U -r requirements.txt')
def restartApache():
run('touch ~/webapps/mosbius_website/mosbius/mosbius/mosbius.wsgi')
|
mit
|
matthaywardwebdesign/rethinkdb
|
test/rql_test/connections/connection_star.py
|
10
|
1535
|
#!/usr/bin/env python
'''Basic test that `from rethinkdb import *` works'''
import os, sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, "common"))
import driver, utils
dbName, tableName = utils.get_test_db_table()
# -- import rethikndb driver via star method
proto_r = utils.import_python_driver()
sys.path.insert(0, os.path.dirname(os.path.realpath(proto_r.__file__)))
from rethinkdb import *
# -- import tests
assert r == rethinkdb
assert issubclass(r, object)
assert issubclass(rethinkdb, object)
assert issubclass(RqlError, Exception)
assert issubclass(RqlClientError, RqlError)
assert issubclass(RqlCompileError, RqlError)
assert issubclass(RqlRuntimeError, RqlError)
assert issubclass(RqlDriverError, Exception)
# -- simple tests
with driver.Process(wait_until_ready=True) as server:
# - connect
r.connect(host=server.host, port=server.driver_port)
conn = rethinkdb.connect(host=server.host, port=server.driver_port)
# - create database
if dbName not in r.db_list().run(conn):
r.db_create(dbName).run(conn)
# - create table
if tableName in r.db(dbName).table_list().run(conn):
r.db(dbName).table_drop(tableName).run(conn)
r.db(dbName).table_create(tableName).run(conn)
# - simple querys
r.db_list().run(conn)
rethinkdb.db_list().run(conn)
assert len(r.db(dbName).table_list().run(conn)) > 0
assert len(rethinkdb.db(dbName).table_list().run(conn)) > 0
|
agpl-3.0
|
Mj258/weiboapi
|
srapyDemo/envs/Lib/site-packages/win32/Demos/service/nativePipeTestService.py
|
15
|
2084
|
# This is an example of a service hosted by python.exe rather than
# pythonservice.exe.
# Note that it is very rare that using python.exe is a better option
# than the default pythonservice.exe - the latter has better error handling
# so that if Python itself can't be initialized or there are very early
# import errors, you will get error details written to the event log. When
# using python.exe instead, you are forced to wait for the interpreter startup
# and imports to succeed before you are able to effectively setup your own
# error handling.
# So in short, please make sure you *really* want to do this, otherwise just
# stick with the default.
import sys
import os
import win32serviceutil
import servicemanager
from pipeTestService import TestPipeService
class NativeTestPipeService(TestPipeService):
_svc_name_ = "PyNativePipeTestService"
_svc_display_name_ = "Python Native Pipe Test Service"
_svc_description_ = "Tests Python.exe hosted services"
# tell win32serviceutil we have a custom executable and custom args
# so registration does the right thing.
_exe_name_ = sys.executable
_exe_args_ = '"' + os.path.abspath(sys.argv[0]) + '"'
def main():
if len(sys.argv)==1:
# service must be starting...
# for the sake of debugging etc, we use win32traceutil to see
# any unhandled exceptions and print statements.
import win32traceutil
print "service is starting..."
print "(execute this script with '--help' if that isn't what you want)"
servicemanager.Initialize()
servicemanager.PrepareToHostSingle(NativeTestPipeService)
# Now ask the service manager to fire things up for us...
servicemanager.StartServiceCtrlDispatcher()
print "service done!"
else:
win32serviceutil.HandleCommandLine(NativeTestPipeService)
if __name__=='__main__':
try:
main()
except (SystemExit, KeyboardInterrupt):
raise
except:
print "Something went bad!"
import traceback
traceback.print_exc()
|
mit
|
synasius/django
|
tests/template_tests/syntax_tests/test_list_index.py
|
521
|
2694
|
from django.test import SimpleTestCase
from ..utils import setup
class ListIndexTests(SimpleTestCase):
@setup({'list-index01': '{{ var.1 }}'})
def test_list_index01(self):
"""
List-index syntax allows a template to access a certain item of a
subscriptable object.
"""
output = self.engine.render_to_string('list-index01', {'var': ['first item', 'second item']})
self.assertEqual(output, 'second item')
@setup({'list-index02': '{{ var.5 }}'})
def test_list_index02(self):
"""
Fail silently when the list index is out of range.
"""
output = self.engine.render_to_string('list-index02', {'var': ['first item', 'second item']})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index03': '{{ var.1 }}'})
def test_list_index03(self):
"""
Fail silently when the list index is out of range.
"""
output = self.engine.render_to_string('list-index03', {'var': None})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index04': '{{ var.1 }}'})
def test_list_index04(self):
"""
Fail silently when variable is a dict without the specified key.
"""
output = self.engine.render_to_string('list-index04', {'var': {}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index05': '{{ var.1 }}'})
def test_list_index05(self):
"""
Dictionary lookup wins out when dict's key is a string.
"""
output = self.engine.render_to_string('list-index05', {'var': {'1': "hello"}})
self.assertEqual(output, 'hello')
@setup({'list-index06': '{{ var.1 }}'})
def test_list_index06(self):
"""
But list-index lookup wins out when dict's key is an int, which
behind the scenes is really a dictionary lookup (for a dict)
after converting the key to an int.
"""
output = self.engine.render_to_string('list-index06', {"var": {1: "hello"}})
self.assertEqual(output, 'hello')
@setup({'list-index07': '{{ var.1 }}'})
def test_list_index07(self):
"""
Dictionary lookup wins out when there is a string and int version
of the key.
"""
output = self.engine.render_to_string('list-index07', {"var": {'1': "hello", 1: "world"}})
self.assertEqual(output, 'hello')
|
bsd-3-clause
|
pigeonflight/strider-plone
|
docker/appengine/lib/django-1.5/django/contrib/localflavor/us/forms.py
|
101
|
4555
|
"""
USA-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select, CharField
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
phone_digits_re = re.compile(r'^(?:1-?)?(\d{3})[-\.]?(\d{3})[-\.]?(\d{4})$')
ssn_re = re.compile(r"^(?P<area>\d{3})[-\ ]?(?P<group>\d{2})[-\ ]?(?P<serial>\d{4})$")
class USZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX or XXXXX-XXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(USZipCodeField, self).__init__(r'^\d{5}(?:-\d{4})?$',
max_length, min_length, *args, **kwargs)
class USPhoneNumberField(CharField):
default_error_messages = {
'invalid': _('Phone numbers must be in XXX-XXX-XXXX format.'),
}
def clean(self, value):
super(USPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('(\(|\)|\s+)', '', smart_text(value))
m = phone_digits_re.search(value)
if m:
return '%s-%s-%s' % (m.group(1), m.group(2), m.group(3))
raise ValidationError(self.error_messages['invalid'])
class USSocialSecurityNumberField(Field):
"""
A United States Social Security number.
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XX-XXXX format.
* No group consists entirely of zeroes.
* The leading group is not "666" (block "666" will never be allocated).
* The number is not in the promotional block 987-65-4320 through
987-65-4329, which are permanently invalid.
* The number is not one known to be invalid due to otherwise widespread
promotional use or distribution (e.g., the Woolworth's number or the
1962 promotional number).
"""
default_error_messages = {
'invalid': _('Enter a valid U.S. Social Security number in XXX-XX-XXXX format.'),
}
def clean(self, value):
super(USSocialSecurityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = re.match(ssn_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
area, group, serial = match.groupdict()['area'], match.groupdict()['group'], match.groupdict()['serial']
# First pass: no blocks of all zeroes.
if area == '000' or \
group == '00' or \
serial == '0000':
raise ValidationError(self.error_messages['invalid'])
# Second pass: promotional and otherwise permanently invalid numbers.
if area == '666' or \
(area == '987' and group == '65' and 4320 <= int(serial) <= 4329) or \
value == '078-05-1120' or \
value == '219-09-9999':
raise ValidationError(self.error_messages['invalid'])
return '%s-%s-%s' % (area, group, serial)
class USStateField(Field):
"""
A form field that validates its input is a U.S. state name or abbreviation.
It normalizes the input to the standard two-leter postal service
abbreviation for the given state.
"""
default_error_messages = {
'invalid': _('Enter a U.S. state or territory.'),
}
def clean(self, value):
from .us_states import STATES_NORMALIZED
super(USStateField, self).clean(value)
if value in EMPTY_VALUES:
return ''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return STATES_NORMALIZED[value.strip().lower()]
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class USStateSelect(Select):
"""
A Select widget that uses a list of U.S. states/territories as its choices.
"""
def __init__(self, attrs=None):
from .us_states import STATE_CHOICES
super(USStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class USPSSelect(Select):
"""
A Select widget that uses a list of US Postal Service codes as its
choices.
"""
def __init__(self, attrs=None):
from .us_states import USPS_CHOICES
super(USPSSelect, self).__init__(attrs, choices=USPS_CHOICES)
|
mit
|
westinedu/similarinterest
|
dbindexer/api.py
|
74
|
1119
|
from .lookups import LookupDoesNotExist, ExtraFieldLookup
from . import lookups as lookups_module
from .resolver import resolver
import inspect
# TODO: add possibility to add lookup modules
def create_lookup(lookup_def):
for _, cls in inspect.getmembers(lookups_module):
if inspect.isclass(cls) and issubclass(cls, ExtraFieldLookup) and \
cls.matches_lookup_def(lookup_def):
return cls()
raise LookupDoesNotExist('No Lookup found for %s .' % lookup_def)
def register_index(model, mapping):
for field_name, lookups in mapping.items():
if not isinstance(lookups, (list, tuple)):
lookups = (lookups, )
# create indexes and add model and field_name to lookups
# create ExtraFieldLookup instances on the fly if needed
for lookup in lookups:
lookup_def = None
if not isinstance(lookup, ExtraFieldLookup):
lookup_def = lookup
lookup = create_lookup(lookup_def)
lookup.contribute(model, field_name, lookup_def)
resolver.create_index(lookup)
|
bsd-3-clause
|
gottesmm/swift
|
utils/swift_build_support/tests/test_host.py
|
48
|
2433
|
# test_host.py - Unit tests for swift_build_support.cmake -*-- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import platform
import unittest
import swift_build_support.host as sbs_host
class HostTestCase(unittest.TestCase):
def test_system_memory(self):
# We make sure that we get an integer back. If we get an integer back,
# we know that we at least were able to get some sort of information
# from the system and it could be parsed as an integer. This is just a
# smoke test.
supported_platforms = [('Darwin', 'x86_64')]
mem = sbs_host.system_memory()
if (platform.system(), platform.machine()) not in supported_platforms:
self.assertIsNone(mem)
else:
self.assertIsInstance(mem, int)
def test_lto_link_job_counts(self):
# Make sure that:
#
# 1. we get back a dictionary with two keys in it, the first called
# llvm, the other called swift.
#
# 2. The values associated with these keys is either None (if we do not
# support the platform) or is an int that is reasonable (i.e. <
# 100). The number 100 is just a heuristic number that is appropriate
# currently since LTO uses so much memory. If and when that changes,
# this number should change.
supported_platforms = [('Darwin', 'x86_64')]
reasonable_upper_bound_of_lto_threads = 100
result = sbs_host.max_lto_link_job_counts()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 2)
if (platform.system(), platform.machine()) not in supported_platforms:
self.assertIsNone(result['llvm'])
self.assertIsNone(result['swift'])
return
self.assertIsNotNone(result['llvm'])
self.assertIsNotNone(result['swift'])
self.assertIsInstance(result['llvm'], int)
self.assertIsInstance(result['swift'], int)
self.assertLess(result['llvm'], reasonable_upper_bound_of_lto_threads)
self.assertLess(result['swift'], reasonable_upper_bound_of_lto_threads)
|
apache-2.0
|
brokenjacobs/ansible
|
lib/ansible/modules/cloud/openstack/os_server_volume.py
|
29
|
4905
|
#!/usr/bin/python
#coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_server_volume
short_description: Attach/Detach Volumes from OpenStack VM's
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Attach or Detach volumes from OpenStack VM's
options:
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
required: false
server:
description:
- Name or ID of server you want to attach a volume to
required: true
volume:
description:
- Name or id of volume you want to attach to a server
required: true
device:
description:
- Device you want to attach. Defaults to auto finding a device name.
required: false
default: None
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Attaches a volume to a compute host
- name: attach a volume
hosts: localhost
tasks:
- name: attach volume to host
os_server_volume:
state: present
cloud: mordred
server: Mysql-server
volume: mysql-data
device: /dev/vdb
'''
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _system_state_change(state, device):
"""Check if system state would change."""
if state == 'present':
if device:
return False
return True
if state == 'absent':
if device:
return True
return False
return False
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
volume=dict(required=True),
device=dict(default=None), # None == auto choose device name
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
wait = module.params['wait']
timeout = module.params['timeout']
try:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
volume = cloud.get_volume(module.params['volume'])
dev = cloud.get_volume_attach_device(volume, server.id)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, dev))
if state == 'present':
if dev:
# Volume is already attached to this server
module.exit_json(changed=False)
cloud.attach_volume(server, volume, module.params['device'],
wait=wait, timeout=timeout)
server = cloud.get_server(module.params['server']) # refresh
volume = cloud.get_volume(module.params['volume']) # refresh
hostvars = meta.get_hostvars_from_server(cloud, server)
module.exit_json(
changed=True,
id=volume['id'],
attachments=volume['attachments'],
openstack=hostvars
)
elif state == 'absent':
if not dev:
# Volume is not attached to this server
module.exit_json(changed=False)
cloud.detach_volume(server, volume, wait=wait, timeout=timeout)
module.exit_json(
changed=True,
result='Detached volume from server'
)
except (shade.OpenStackCloudException, shade.OpenStackCloudTimeout) as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_utils/common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
chetan51/nupic
|
examples/opf/experiments/opfrunexperiment_test/checkpoints/b/description.py
|
17
|
2324
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config ={
'aggregationInfo' : {'seconds': 0, 'fields': [(u'c1', 'first'), (u'c0', 'first')], 'months': 0, 'days': 0, 'years': 0, 'hours': 1, 'microseconds': 0, 'weeks': 0, 'minutes': 0, 'milliseconds': 0},
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'inferenceType': 'NontemporalMultiStep', 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tpParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'dataPath': 'data/b.csv',
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
|
gpl-3.0
|
boundarydevices/android_external_chromium_org
|
chrome/common/extensions/docs/server2/api_categorizer_test.py
|
10
|
2368
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from api_categorizer import APICategorizer
from compiled_file_system import CompiledFileSystem
from extensions_paths import CHROME_EXTENSIONS
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
def _ToTestData(obj):
'''Transforms |obj| into test data by turning a list of files into an object
mapping that file to its contents (derived from its name).
'''
return dict((name, name) for name in obj)
_TEST_DATA = {
'api': {
'_api_features.json': '{}',
'_manifest_features.json': '{}',
'_permission_features.json': '{}',
},
'docs': {
'templates': {
'json': {
'api_availabilities.json': '{}',
'manifest.json': '{}',
'permissions.json': '{}',
},
'public': {
'apps': _ToTestData([
'alarms.html',
'app_window.html',
'experimental_bluetooth.html',
'experimental_power.html',
'storage.html',
'sockets_udp.html'
]),
'extensions': _ToTestData([
'alarms.html',
'browserAction.html',
'experimental_history.html',
'experimental_power.html',
'infobars.html',
'storage.html',
'sockets_udp.html'
]),
},
},
}
}
class APICategorizerTest(unittest.TestCase):
def setUp(self):
self._api_categorizer = APICategorizer(
TestFileSystem(_TEST_DATA, relative_to=CHROME_EXTENSIONS),
CompiledFileSystem.Factory(ObjectStoreCreator.ForTest()))
def testGetAPICategory(self):
get_category = self._api_categorizer.GetCategory
self.assertEqual('chrome', get_category('apps', 'alarms'))
self.assertEqual('chrome', get_category('extensions', 'alarms'))
self.assertEqual('private', get_category('apps', 'musicManagerPrivate'))
self.assertEqual('private', get_category('extensions', 'notDocumentedApi'))
self.assertEqual('experimental',
get_category('apps', 'experimental.bluetooth'))
self.assertEqual('experimental',
get_category('extensions', 'experimental.history'))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
nowopen/scrapy
|
tests/test_contracts.py
|
140
|
5471
|
from unittest import TextTestResult
from twisted.trial import unittest
from scrapy.spiders import Spider
from scrapy.http import Request
from scrapy.item import Item, Field
from scrapy.contracts import ContractsManager
from scrapy.contracts.default import (
UrlContract,
ReturnsContract,
ScrapesContract,
)
class TestItem(Item):
name = Field()
url = Field()
class ResponseMock(object):
url = 'http://scrapy.org'
class TestSpider(Spider):
name = 'demo_spider'
def returns_request(self, response):
""" method which returns request
@url http://scrapy.org
@returns requests 1
"""
return Request('http://scrapy.org', callback=self.returns_item)
def returns_item(self, response):
""" method which returns item
@url http://scrapy.org
@returns items 1 1
"""
return TestItem(url=response.url)
def returns_dict_item(self, response):
""" method which returns item
@url http://scrapy.org
@returns items 1 1
"""
return {"url": response.url}
def returns_fail(self, response):
""" method which returns item
@url http://scrapy.org
@returns items 0 0
"""
return TestItem(url=response.url)
def returns_dict_fail(self, response):
""" method which returns item
@url http://scrapy.org
@returns items 0 0
"""
return {'url': response.url}
def scrapes_item_ok(self, response):
""" returns item with name and url
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return TestItem(name='test', url=response.url)
def scrapes_dict_item_ok(self, response):
""" returns item with name and url
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return {'name': 'test', 'url': response.url}
def scrapes_item_fail(self, response):
""" returns item with no name
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return TestItem(url=response.url)
def scrapes_dict_item_fail(self, response):
""" returns item with no name
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return {'url': response.url}
def parse_no_url(self, response):
""" method with no url
@returns items 1 1
"""
pass
class ContractsManagerTest(unittest.TestCase):
contracts = [UrlContract, ReturnsContract, ScrapesContract]
def setUp(self):
self.conman = ContractsManager(self.contracts)
self.results = TextTestResult(stream=None, descriptions=False, verbosity=0)
def should_succeed(self):
self.assertFalse(self.results.failures)
self.assertFalse(self.results.errors)
def should_fail(self):
self.assertTrue(self.results.failures)
self.assertFalse(self.results.errors)
def test_contracts(self):
spider = TestSpider()
# extract contracts correctly
contracts = self.conman.extract_contracts(spider.returns_request)
self.assertEqual(len(contracts), 2)
self.assertEqual(frozenset(type(x) for x in contracts),
frozenset([UrlContract, ReturnsContract]))
# returns request for valid method
request = self.conman.from_method(spider.returns_request, self.results)
self.assertNotEqual(request, None)
# no request for missing url
request = self.conman.from_method(spider.parse_no_url, self.results)
self.assertEqual(request, None)
def test_returns(self):
spider = TestSpider()
response = ResponseMock()
# returns_item
request = self.conman.from_method(spider.returns_item, self.results)
request.callback(response)
self.should_succeed()
# returns_dict_item
request = self.conman.from_method(spider.returns_dict_item, self.results)
request.callback(response)
self.should_succeed()
# returns_request
request = self.conman.from_method(spider.returns_request, self.results)
request.callback(response)
self.should_succeed()
# returns_fail
request = self.conman.from_method(spider.returns_fail, self.results)
request.callback(response)
self.should_fail()
# returns_dict_fail
request = self.conman.from_method(spider.returns_dict_fail, self.results)
request.callback(response)
self.should_fail()
def test_scrapes(self):
spider = TestSpider()
response = ResponseMock()
# scrapes_item_ok
request = self.conman.from_method(spider.scrapes_item_ok, self.results)
request.callback(response)
self.should_succeed()
# scrapes_dict_item_ok
request = self.conman.from_method(spider.scrapes_dict_item_ok, self.results)
request.callback(response)
self.should_succeed()
# scrapes_item_fail
request = self.conman.from_method(spider.scrapes_item_fail,
self.results)
request.callback(response)
self.should_fail()
# scrapes_dict_item_fail
request = self.conman.from_method(spider.scrapes_dict_item_fail,
self.results)
request.callback(response)
self.should_fail()
|
bsd-3-clause
|
google/rekall
|
rekall-core/rekall/plugins/windows/malware/apihooks_test.py
|
4
|
1482
|
from rekall import addrspace
from rekall import testlib
from rekall.plugins.windows.malware import apihooks
class TestHookHeuristics(testlib.RekallBaseUnitTestCase):
"""Test the hook detection heuristic.
The actual test cases are generated using the nasm assembler in:
rekall/src/hooks/amd64.asm and rekall/src/hooks/i386.asm
"""
def testHook(self):
session = self.MakeUserSession()
# The target address should be fixed at this offset.
target = 0x100
heuristic = apihooks.HookHeuristic(session=session)
profile = session.profile = session.LoadProfile("tests/hooks")
for arch in ["AMD64", "I386"]:
for test_case in profile.data[arch]:
offset = test_case["offset"]
# Test case data is the assembly snippet mapped at the specified
# offset in the address space.
address_space = addrspace.BufferAddressSpace(
data=test_case["data"].decode("base64"),
session=session, base_offset=offset)
function = session.profile.Function(
offset=offset, vm=address_space, name=test_case["name"],
mode=arch)
# Detect the jump in this function
destination = heuristic.Inspect(function)
# All hooks in test cases go to the same target offset (0x100).
self.assertEqual(destination, target)
|
gpl-2.0
|
mgaffney/avro
|
lang/py/src/avro/protocol.py
|
2
|
8167
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Protocol implementation.
"""
try:
import hashlib
except ImportError:
import md5
try:
import simplejson as json
except ImportError:
import json
from avro import schema
#
# Constants
#
# TODO(hammer): confirmed 'fixed' with Doug
VALID_TYPE_SCHEMA_TYPES = ('enum', 'record', 'error', 'fixed')
#
# Exceptions
#
class ProtocolParseException(schema.AvroException):
pass
#
# Base Classes
#
class Protocol(object):
"""An application protocol."""
def _parse_types(self, types, type_names):
type_objects = []
for type in types:
type_object = schema.make_avsc_object(type, type_names)
if type_object.type not in VALID_TYPE_SCHEMA_TYPES:
fail_msg = 'Type %s not an enum, fixed, record, or error.' % type
raise ProtocolParseException(fail_msg)
type_objects.append(type_object)
return type_objects
def _parse_messages(self, messages, names):
message_objects = {}
for name, body in messages.iteritems():
if message_objects.has_key(name):
fail_msg = 'Message name "%s" repeated.' % name
raise ProtocolParseException(fail_msg)
elif not(hasattr(body, 'get') and callable(body.get)):
fail_msg = 'Message name "%s" has non-object body %s.' % (name, body)
raise ProtocolParseException(fail_msg)
request = body.get('request')
response = body.get('response')
errors = body.get('errors')
message_objects[name] = Message(name, request, response, errors, names)
return message_objects
def __init__(self, name, namespace=None, types=None, messages=None):
# Ensure valid ctor args
if not name:
fail_msg = 'Protocols must have a non-empty name.'
raise ProtocolParseException(fail_msg)
elif not isinstance(name, basestring):
fail_msg = 'The name property must be a string.'
raise ProtocolParseException(fail_msg)
elif namespace is not None and not isinstance(namespace, basestring):
fail_msg = 'The namespace property must be a string.'
raise ProtocolParseException(fail_msg)
elif types is not None and not isinstance(types, list):
fail_msg = 'The types property must be a list.'
raise ProtocolParseException(fail_msg)
elif (messages is not None and
not(hasattr(messages, 'get') and callable(messages.get))):
fail_msg = 'The messages property must be a JSON object.'
raise ProtocolParseException(fail_msg)
self._props = {}
self.set_prop('name', name)
if namespace is not None: self.set_prop('namespace', namespace)
type_names = {}
if types is not None:
self.set_prop('types', self._parse_types(types, type_names))
if messages is not None:
self.set_prop('messages', self._parse_messages(messages, type_names))
if hashlib:
self._md5 = hashlib.md5(str(self)).digest()
else:
self._md5 = md5.new(str(self)).digest()
# read-only properties
name = property(lambda self: self.get_prop('name'))
namespace = property(lambda self: self.get_prop('namespace'))
fullname = property(lambda self:
schema.Name.make_fullname(self.name, self.namespace))
types = property(lambda self: self.get_prop('types'))
types_dict = property(lambda self: dict([(type.name, type)
for type in self.types]))
messages = property(lambda self: self.get_prop('messages'))
md5 = property(lambda self: self._md5)
props = property(lambda self: self._props)
# utility functions to manipulate properties dict
def get_prop(self, key):
return self.props.get(key)
def set_prop(self, key, value):
self.props[key] = value
def __str__(self):
# until we implement a JSON encoder for Schema and Message objects,
# we'll have to go through and call str() by hand.
to_dump = {}
to_dump['protocol'] = self.name
if self.namespace: to_dump['namespace'] = self.namespace
if self.types:
to_dump['types'] = [json.loads(str(t)) for t in self.types]
if self.messages:
messages_dict = {}
for name, body in self.messages.iteritems():
messages_dict[name] = json.loads(str(body))
to_dump['messages'] = messages_dict
return json.dumps(to_dump)
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
class Message(object):
"""A Protocol message."""
def _parse_request(self, request, names):
if not isinstance(request, list):
fail_msg = 'Request property not a list: %s' % request
raise ProtocolParseException(fail_msg)
return schema.RecordSchema(None, None, request, names, 'request')
def _parse_response(self, response, names):
if isinstance(response, basestring) and names.has_key(response):
self._response_from_names = True
return names.get(response)
else:
return schema.make_avsc_object(response, names)
def _parse_errors(self, errors, names):
if not isinstance(errors, list):
fail_msg = 'Errors property not a list: %s' % errors
raise ProtocolParseException(fail_msg)
errors_for_parsing = {'type': 'error_union', 'declared_errors': errors}
return schema.make_avsc_object(errors_for_parsing, names)
def __init__(self, name, request, response, errors=None, names=None):
self._name = name
self._response_from_names = False
self._props = {}
self.set_prop('request', self._parse_request(request, names))
self.set_prop('response', self._parse_response(response, names))
if errors is not None:
self.set_prop('errors', self._parse_errors(errors, names))
# read-only properties
name = property(lambda self: self._name)
response_from_names = property(lambda self: self._response_from_names)
request = property(lambda self: self.get_prop('request'))
response = property(lambda self: self.get_prop('response'))
errors = property(lambda self: self.get_prop('errors'))
props = property(lambda self: self._props)
# utility functions to manipulate properties dict
def get_prop(self, key):
return self.props.get(key)
def set_prop(self, key, value):
self.props[key] = value
# TODO(hammer): allow schemas and fields to be JSON Encoded!
def __str__(self):
to_dump = {}
to_dump['request'] = json.loads(str(self.request))
if self.response_from_names:
to_dump['response'] = self.response.fullname
else:
to_dump['response'] = json.loads(str(self.response))
if self.errors:
to_dump['errors'] = json.loads(str(self.errors))
return json.dumps(to_dump)
def __eq__(self, that):
return self.name == that.name and self.props == that.props
def make_avpr_object(json_data):
"""Build Avro Protocol from data parsed out of JSON string."""
if hasattr(json_data, 'get') and callable(json_data.get):
name = json_data.get('protocol')
namespace = json_data.get('namespace')
types = json_data.get('types')
messages = json_data.get('messages')
return Protocol(name, namespace, types, messages)
else:
raise ProtocolParseException('Not a JSON object: %s' % json_data)
def parse(json_string):
"""Constructs the Protocol from the JSON text."""
try:
json_data = json.loads(json_string)
except:
raise ProtocolParseException('Error parsing JSON: %s' % json_string)
# construct the Avro Protocol object
return make_avpr_object(json_data)
|
apache-2.0
|
Tiger66639/ansible-modules-core
|
packaging/os/rhn_register.py
|
122
|
12900
|
#!/usr/bin/python
# (c) James Laska
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rhn_register
short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command
description:
- Manage registration to the Red Hat Network.
version_added: "1.2"
author: James Laska
notes:
- In order to register a system, rhnreg_ks requires either a username and password, or an activationkey.
requirements:
- rhnreg_ks
options:
state:
description:
- whether to register (C(present)), or unregister (C(absent)) a system
required: false
choices: [ "present", "absent" ]
default: "present"
username:
description:
- Red Hat Network username
required: False
default: null
password:
description:
- Red Hat Network password
required: False
default: null
server_url:
description:
- Specify an alternative Red Hat Network server URL
required: False
default: Current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date) is the default
activationkey:
description:
- supply an activation key for use with registration
required: False
default: null
profilename:
description:
- supply an profilename for use with registration
required: False
default: null
version_added: "2.0"
channels:
description:
- Optionally specify a list of comma-separated channels to subscribe to upon successful registration.
required: false
default: []
'''
EXAMPLES = '''
# Unregister system from RHN.
- rhn_register: state=absent username=joe_user password=somepass
# Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
- rhn_register: state=present username=joe_user password=somepass
# Register with activationkey (1-222333444) and enable extended update support.
- rhn_register: state=present activationkey=1-222333444 enable_eus=true
# Register with activationkey (1-222333444) and set a profilename which may differ from the hostname.
- rhn_register: state=present activationkey=1-222333444 profilename=host.example.com.custom
# Register as user (joe_user) with password (somepass) against a satellite
# server specified by (server_url).
- rhn_register: >
state=present
username=joe_user
password=somepass
server_url=https://xmlrpc.my.satellite/XMLRPC
# Register as user (joe_user) with password (somepass) and enable
# channels (rhel-x86_64-server-6-foo-1) and (rhel-x86_64-server-6-bar-1).
- rhn_register: state=present username=joe_user
password=somepass
channels=rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
'''
import sys
import types
import xmlrpclib
import urlparse
# Attempt to import rhn client tools
sys.path.insert(0, '/usr/share/rhn')
try:
import up2date_client
import up2date_client.config
except ImportError, e:
module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?\n%s" % e)
# INSERT REDHAT SNIPPETS
from ansible.module_utils.redhat import *
# INSERT COMMON SNIPPETS
from ansible.module_utils.basic import *
class Rhn(RegistrationBase):
def __init__(self, username=None, password=None):
RegistrationBase.__init__(self, username, password)
self.config = self.load_config()
def load_config(self):
'''
Read configuration from /etc/sysconfig/rhn/up2date
'''
self.config = up2date_client.config.initUp2dateConfig()
# Add support for specifying a default value w/o having to standup some
# configuration. Yeah, I know this should be subclassed ... but, oh
# well
def get_option_default(self, key, default=''):
# ignore pep8 W601 errors for this line
# setting this to use 'in' does not work in the rhn library
if self.has_key(key):
return self[key]
else:
return default
self.config.get_option = types.MethodType(get_option_default, self.config, up2date_client.config.Config)
return self.config
@property
def hostname(self):
'''
Return the non-xmlrpc RHN hostname. This is a convenience method
used for displaying a more readable RHN hostname.
Returns: str
'''
url = urlparse.urlparse(self.config['serverURL'])
return url[1].replace('xmlrpc.','')
@property
def systemid(self):
systemid = None
xpath_str = "//member[name='system_id']/value/string"
if os.path.isfile(self.config['systemIdPath']):
fd = open(self.config['systemIdPath'], 'r')
xml_data = fd.read()
fd.close()
# Ugh, xml parsing time ...
# First, try parsing with libxml2 ...
if systemid is None:
try:
import libxml2
doc = libxml2.parseDoc(xml_data)
ctxt = doc.xpathNewContext()
systemid = ctxt.xpathEval(xpath_str)[0].content
doc.freeDoc()
ctxt.xpathFreeContext()
except ImportError:
pass
# m-kay, let's try with lxml now ...
if systemid is None:
try:
from lxml import etree
root = etree.fromstring(xml_data)
systemid = root.xpath(xpath_str)[0].text
except ImportError:
pass
# Strip the 'ID-' prefix
if systemid is not None and systemid.startswith('ID-'):
systemid = systemid[3:]
return int(systemid)
@property
def is_registered(self):
'''
Determine whether the current system is registered.
Returns: True|False
'''
return os.path.isfile(self.config['systemIdPath'])
def configure(self, server_url):
'''
Configure system for registration
'''
self.config.set('serverURL', server_url)
self.config.save()
def enable(self):
'''
Prepare the system for RHN registration. This includes ...
* enabling the rhnplugin yum plugin
* disabling the subscription-manager yum plugin
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', True)
self.update_plugin_conf('subscription-manager', False)
def register(self, enable_eus=False, activationkey=None, profilename=None):
'''
Register system to RHN. If enable_eus=True, extended update
support will be requested.
'''
register_cmd = "/usr/sbin/rhnreg_ks --username='%s' --password='%s' --force" % (self.username, self.password)
if self.module.params.get('server_url', None):
register_cmd += " --serverUrl=%s" % self.module.params.get('server_url')
if enable_eus:
register_cmd += " --use-eus-channel"
if activationkey is not None:
register_cmd += " --activationkey '%s'" % activationkey
if profilename is not None:
register_cmd += " --profilename '%s'" % profilename
# FIXME - support --systemorgid
rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True)
def api(self, method, *args):
'''
Convenience RPC wrapper
'''
if not hasattr(self, 'server') or self.server is None:
if self.hostname != 'rhn.redhat.com':
url = "https://%s/rpc/api" % self.hostname
else:
url = "https://xmlrpc.%s/rpc/api" % self.hostname
self.server = xmlrpclib.Server(url, verbose=0)
self.session = self.server.auth.login(self.username, self.password)
func = getattr(self.server, method)
return func(self.session, *args)
def unregister(self):
'''
Unregister a previously registered system
'''
# Initiate RPC connection
self.api('system.deleteSystems', [self.systemid])
# Remove systemid file
os.unlink(self.config['systemIdPath'])
def subscribe(self, channels=[]):
if len(channels) <= 0:
return
current_channels = self.api('channel.software.listSystemChannels', self.systemid)
new_channels = [item['channel_label'] for item in current_channels]
new_channels.extend(channels)
return self.api('channel.software.setSystemChannels', self.systemid, new_channels)
def _subscribe(self, channels=[]):
'''
Subscribe to requested yum repositories using 'rhn-channel' command
'''
rhn_channel_cmd = "rhn-channel --user='%s' --password='%s'" % (self.username, self.password)
rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --available-channels", check_rc=True)
# Enable requested repoid's
for wanted_channel in channels:
# Each inserted repo regexp will be matched. If no match, no success.
for available_channel in stdout.rstrip().split('\n'): # .rstrip() because of \n at the end -> empty string at the end
if re.search(wanted_repo, available_channel):
rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel, check_rc=True)
def main():
# Read system RHN configuration
rhn = Rhn()
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent']),
username = dict(default=None, required=False),
password = dict(default=None, required=False),
server_url = dict(default=rhn.config.get_option('serverURL'), required=False),
activationkey = dict(default=None, required=False),
profilename = dict(default=None, required=False),
enable_eus = dict(default=False, type='bool'),
channels = dict(default=[], type='list'),
)
)
state = module.params['state']
rhn.username = module.params['username']
rhn.password = module.params['password']
rhn.configure(module.params['server_url'])
activationkey = module.params['activationkey']
profilename = module.params['profilename']
channels = module.params['channels']
rhn.module = module
# Ensure system is registered
if state == 'present':
# Check for missing parameters ...
if not (activationkey or rhn.username or rhn.password):
module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, rhn.password))
if not activationkey and not (rhn.username and rhn.password):
module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
# Register system
if rhn.is_registered:
module.exit_json(changed=False, msg="System already registered.")
else:
try:
rhn.enable()
rhn.register(module.params['enable_eus'] == True, activationkey)
rhn.subscribe(channels)
except Exception, e:
module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, e))
module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
# Ensure system is *not* registered
if state == 'absent':
if not rhn.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
else:
try:
rhn.unregister()
except Exception, e:
module.fail_json(msg="Failed to unregister: %s" % e)
module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
main()
|
gpl-3.0
|
doptio/you-owe-it
|
yoi/app.py
|
1
|
2787
|
from __future__ import unicode_literals, division
from flask import Flask, request, redirect
from flask.ext.sqlalchemy import SQLAlchemy
import os
from raven import Client
from raven.middleware import Sentry
from yoi.account.user import bp as account
from yoi.config import (secret,
database_url,
in_production,
canonical_domain,
always_secure)
from yoi import dweeb
from yoi.flask_genshi import Genshi, render_response
from yoi import middleware
from yoi.resources import static_url
app = Flask(__name__)
app.request_class = dweeb.Request
app.genshi = Genshi(app)
app.db = SQLAlchemy(app)
app.register_blueprint(account)
# FIXME - Use app.config.from_object
app.config['DEBUG'] = True
app.config['PROPAGATE_EXCEPTIONS'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = database_url
app.config['SECRET_KEY'] = secret
if canonical_domain:
app.config['SESSION_COOKIE_DOMAIN'] = canonical_domain
app.config['SESSION_COOKIE_HTTPONLY'] = True
app.config['SESSION_COOKIE_SECURE'] = always_secure
# Global HTTP response headers
cache_headers = [
('Cache-Control', 'public'),
]
no_cache_headers = [
('Cache-Control', 'no-cache'),
('Expires', 'Sat, 07 Jul 1979 23:00:00 GMT'),
]
hsts_headers = [
('Strict-Transport-Security', 'max-age=31536000; includeSubDomains'),
]
@app.after_request
def add_global_headers(response):
expires = getattr(response, 'expires', None)
if expires:
response.headers.extend(cache_headers)
else:
response.headers.extend(no_cache_headers)
if request.is_secure:
response.headers.extend(hsts_headers)
return response
@app.before_request
def canonical_redirect():
if always_secure and not request.is_secure:
return redirect(request.url.replace('http://', 'https://'))
if canonical_domain and request.host != canonical_domain:
return redirect(request.url.replace('://' + request.host,
'://' + canonical_domain))
# Nice error pages
@app.errorhandler(404)
def not_found(e):
return render_response('404.html'), 404
# Error-reporting middleware
if 'SENTRY_URL' in os.environ:
app.wsgi_app = Sentry(app.wsgi_app, Client(os.environ['SENTRY_URL']))
# Nice 'Internal Server Error' page
# FIXME - should use render_template.
with app.test_request_context('/'):
error_page = (app.genshi
.template_loader.load('500.html')
.generate(g={'user': None},
get_flashed_messages=lambda **kwargs: [],
static_url=static_url)
.render('html'))
if in_production:
app.wsgi_app = middleware.error_page(app.wsgi_app, error_page)
|
mit
|
shepdelacreme/ansible
|
lib/ansible/modules/network/interface/net_linkagg.py
|
96
|
2696
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_linkagg
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage link aggregation groups on network devices
description:
- This module provides declarative management of link aggregation groups
on network devices.
options:
name:
description:
- Name of the link aggregation group.
required: true
mode:
description:
- Mode of the link aggregation group. A value of C(on) will enable LACP.
C(active) configures the link to actively information about the state of the link,
or it can be configured in C(passive) mode ie. send link state information only when
received them from another link.
default: on
choices: ['on', 'active', 'passive']
members:
description:
- List of members interfaces of the link aggregation group. The value can be
single interface or list of interfaces.
required: true
min_links:
description:
- Minimum members that should be up
before bringing up the link aggregation group.
aggregate:
description: List of link aggregation definitions.
purge:
description:
- Purge link aggregation groups not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the link aggregation group.
default: present
choices: ['present', 'absent', 'up', 'down']
"""
EXAMPLES = """
- name: configure link aggregation group
net_linkagg:
name: bond0
members:
- eth0
- eth1
- name: remove configuration
net_linkagg:
name: bond0
state: absent
- name: Create aggregate of linkagg definitions
net_linkagg:
aggregate:
- { name: bond0, members: [eth1] }
- { name: bond1, members: [eth2] }
- name: Remove aggregate of linkagg definitions
net_linkagg:
aggregate:
- name: bond0
- name: bond1
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- set interfaces bonding bond0
- set interfaces ethernet eth0 bond-group 'bond0'
- set interfaces ethernet eth1 bond-group 'bond0'
"""
|
gpl-3.0
|
AaltoML/kalman-jax
|
kalmanjax/likelihoods.py
|
1
|
48869
|
import jax.numpy as np
from jax.scipy.special import erf, gammaln
from jax import jit, partial, jacrev, random, vmap, grad
from jax.scipy.linalg import cholesky, cho_factor, cho_solve
from utils import inv, softplus, sigmoid, logphi, gaussian_moment_match, softplus_inv, gauss_hermite, \
ensure_positive_precision
pi = 3.141592653589793
def gaussian_first_derivative_wrt_mean(f, m, C, w):
invC = inv(C)
return invC @ (f - m) * w
def gaussian_second_derivative_wrt_mean(f, m, C, w):
invC = inv(C)
return (invC @ (f - m) @ (f - m).T @ invC - invC) * w
class Likelihood(object):
"""
The likelihood model class, p(yₙ|fₙ). Each likelihood implements its own parameter update methods:
Moment matching is used for EP
Statistical linearisation is used for SLEP / UKS / GHKS
Ananlytical linearisation is used for EEP / EKS
Variational expectation is used for VI
If no custom parameter update method is provided, cubature is used (Gauss-Hermite by default).
The requirement for all inference methods to work is the implementation of the following methods:
evaluate_likelihood(), which simply evaluates the likelihood given the latent function
evaluate_log_likelihood()
conditional_moments(), which return E[y|f] and Cov[y|f]
"""
def __init__(self, hyp=None):
"""
:param hyp: (hyper)parameters of the likelihood model
"""
hyp = [] if hyp is None else hyp
self.hyp = softplus_inv(np.array(hyp))
def evaluate_likelihood(self, y, f, hyp=None):
raise NotImplementedError('direct evaluation of this likelihood is not implemented')
def evaluate_log_likelihood(self, y, f, hyp=None):
raise NotImplementedError('direct evaluation of this log-likelihood is not implemented')
def conditional_moments(self, f, hyp=None):
raise NotImplementedError('conditional moments of this likelihood are not implemented')
@partial(jit, static_argnums=(0, 6))
def moment_match_cubature(self, y, cav_mean, cav_cov, hyp=None, power=1.0, cubature_func=None):
"""
TODO: N.B. THIS VERSION IS SUPERCEDED BY THE FUNCTION BELOW. HOWEVER THIS ONE MAY BE MORE STABLE.
Perform moment matching via cubature.
Moment matching invloves computing the log partition function, logZₙ, and its derivatives w.r.t. the cavity mean
logZₙ = log ∫ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
with EP power a.
:param y: observed data (yₙ) [scalar]
:param cav_mean: cavity mean (mₙ) [scalar]
:param cav_cov: cavity covariance (cₙ) [scalar]
:param hyp: likelihood hyperparameter [scalar]
:param power: EP power / fraction (a) [scalar]
:param cubature_func: the function to compute sigma points and weights to use during cubature
:return:
lZ: the log partition function, logZₙ [scalar]
dlZ: first derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
d2lZ: second derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
"""
if cubature_func is None:
x, w = gauss_hermite(cav_mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature_func(cav_mean.shape[0])
cav_cho, low = cho_factor(cav_cov)
# fsigᵢ=xᵢ√cₙ + mₙ: scale locations according to cavity dist.
sigma_points = cav_cho @ np.atleast_2d(x) + cav_mean
# pre-compute wᵢ pᵃ(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_likelihood_eval = w * self.evaluate_likelihood(y, sigma_points, hyp) ** power
# a different approach, based on the log-likelihood, which can be more stable:
# ll = self.evaluate_log_likelihood(y, sigma_points)
# lmax = np.max(ll)
# weighted_likelihood_eval = np.exp(lmax * power) * w * np.exp(power * (ll - lmax))
# Compute partition function via cubature:
# Zₙ = ∫ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ pᵃ(yₙ|fsigᵢ)
Z = np.sum(
weighted_likelihood_eval, axis=-1
)
lZ = np.log(Z)
Zinv = 1.0 / Z
# Compute derivative of partition function via cubature:
# dZₙ/dmₙ = ∫ (fₙ-mₙ) vₙ⁻¹ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fₙ-mₙ) vₙ⁻¹ pᵃ(yₙ|fsigᵢ)
covinv_f_m = cho_solve((cav_cho, low), sigma_points - cav_mean)
dZ = np.sum(
# (sigma_points - cav_mean) / cav_cov
covinv_f_m
* weighted_likelihood_eval,
axis=-1
)
# dlogZₙ/dmₙ = (dZₙ/dmₙ) / Zₙ
dlZ = Zinv * dZ
# Compute second derivative of partition function via cubature:
# d²Zₙ/dmₙ² = ∫ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹] pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹] pᵃ(yₙ|fsigᵢ)
d2Z = np.sum(
((sigma_points - cav_mean) ** 2 / cav_cov ** 2 - 1.0 / cav_cov)
* weighted_likelihood_eval
)
# d²logZₙ/dmₙ² = d[(dZₙ/dmₙ) / Zₙ]/dmₙ
# = (d²Zₙ/dmₙ² * Zₙ - (dZₙ/dmₙ)²) / Zₙ²
# = d²Zₙ/dmₙ² / Zₙ - (dlogZₙ/dmₙ)²
d2lZ = -dlZ @ dlZ.T + Zinv * d2Z
id2lZ = inv(ensure_positive_precision(-d2lZ) - 1e-10 * np.eye(d2lZ.shape[0]))
site_mean = cav_mean + id2lZ @ dlZ # approx. likelihood (site) mean (see Rasmussen & Williams p75)
site_cov = power * (-cav_cov + id2lZ) # approx. likelihood (site) variance
return lZ, site_mean, site_cov
@partial(jit, static_argnums=(0, 6))
def moment_match_cubature(self, y, cav_mean, cav_cov, hyp=None, power=1.0, cubature_func=None):
"""
TODO: N.B. THIS VERSION ALLOWS MULTI-DIMENSIONAL MOMENT MATCHING, BUT CAN BE UNSTABLE
Perform moment matching via cubature.
Moment matching invloves computing the log partition function, logZₙ, and its derivatives w.r.t. the cavity mean
logZₙ = log ∫ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
with EP power a.
:param y: observed data (yₙ) [scalar]
:param cav_mean: cavity mean (mₙ) [scalar]
:param cav_cov: cavity covariance (cₙ) [scalar]
:param hyp: likelihood hyperparameter [scalar]
:param power: EP power / fraction (a) [scalar]
:param cubature_func: the function to compute sigma points and weights to use during cubature
:return:
lZ: the log partition function, logZₙ [scalar]
dlZ: first derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
d2lZ: second derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
"""
if cubature_func is None:
x, w = gauss_hermite(cav_mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature_func(cav_mean.shape[0])
cav_cho, low = cho_factor(cav_cov)
# fsigᵢ=xᵢ√cₙ + mₙ: scale locations according to cavity dist.
sigma_points = cav_cho @ np.atleast_2d(x) + cav_mean
# pre-compute wᵢ pᵃ(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_likelihood_eval = w * self.evaluate_likelihood(y, sigma_points, hyp) ** power
# Compute partition function via cubature:
# Zₙ = ∫ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ pᵃ(yₙ|fsigᵢ)
Z = np.sum(
weighted_likelihood_eval, axis=-1
)
lZ = np.log(np.maximum(Z, 1e-8))
Zinv = 1.0 / np.maximum(Z, 1e-8)
# Compute derivative of partition function via cubature:
# dZₙ/dmₙ = ∫ (fₙ-mₙ) vₙ⁻¹ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fₙ-mₙ) vₙ⁻¹ pᵃ(yₙ|fsigᵢ)
d1 = vmap(
gaussian_first_derivative_wrt_mean, (1, None, None, 1)
)(sigma_points[..., None], cav_mean, cav_cov, weighted_likelihood_eval)
dZ = np.sum(d1, axis=0)
# dlogZₙ/dmₙ = (dZₙ/dmₙ) / Zₙ
dlZ = Zinv * dZ
# Compute second derivative of partition function via cubature:
# d²Zₙ/dmₙ² = ∫ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹] pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹] pᵃ(yₙ|fsigᵢ)
d2 = vmap(
gaussian_second_derivative_wrt_mean, (1, None, None, 1)
)(sigma_points[..., None], cav_mean, cav_cov, weighted_likelihood_eval)
d2Z = np.sum(d2, axis=0)
# d²logZₙ/dmₙ² = d[(dZₙ/dmₙ) / Zₙ]/dmₙ
# = (d²Zₙ/dmₙ² * Zₙ - (dZₙ/dmₙ)²) / Zₙ²
# = d²Zₙ/dmₙ² / Zₙ - (dlogZₙ/dmₙ)²
d2lZ = -dlZ @ dlZ.T + Zinv * d2Z
id2lZ = inv(ensure_positive_precision(-d2lZ) - 1e-10 * np.eye(d2lZ.shape[0]))
site_mean = cav_mean + id2lZ @ dlZ # approx. likelihood (site) mean (see Rasmussen & Williams p75)
site_cov = power * (-cav_cov + id2lZ) # approx. likelihood (site) variance
return lZ, site_mean, site_cov
@partial(jit, static_argnums=(0, 6))
def moment_match(self, y, m, v, hyp=None, power=1.0, cubature_func=None):
"""
If no custom moment matching method is provided, we use cubature.
"""
return self.moment_match_cubature(y, m, v, hyp, power, cubature_func)
@staticmethod
def link_fn(latent_mean):
return latent_mean
def sample(self, f, rng_key=123):
lik_expectation, lik_variance = self.conditional_moments(f)
lik_std = cholesky(np.diag(np.expand_dims(lik_variance, 0)))
return lik_expectation + lik_std * random.normal(random.PRNGKey(rng_key), shape=f.shape)
@partial(jit, static_argnums=(0, 4))
def statistical_linear_regression_cubature(self, cav_mean, cav_cov, hyp=None, cubature_func=None):
"""
Perform statistical linear regression (SLR) using cubature.
We aim to find a likelihood approximation p(yₙ|fₙ) ≈ 𝓝(yₙ|Afₙ+b,Ω+Var[yₙ|fₙ]).
TODO: this currently assumes an additive noise model (ok for our current applications), make more general
"""
if cubature_func is None:
x, w = gauss_hermite(cav_mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature_func(cav_mean.shape[0])
# fsigᵢ=xᵢ√(vₙ) + mₙ: scale locations according to cavity dist.
sigma_points = cholesky(cav_cov) @ np.atleast_2d(x) + cav_mean
lik_expectation, lik_covariance = self.conditional_moments(sigma_points, hyp)
# Compute zₙ via cubature:
# zₙ = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ]
mu = np.sum(
w * lik_expectation, axis=-1
)[:, None]
# Compute variance S via cubature:
# S = ∫ [(E[yₙ|fₙ]-zₙ) (E[yₙ|fₙ]-zₙ)' + Cov[yₙ|fₙ]] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(E[yₙ|fsigᵢ]-zₙ) (E[yₙ|fsigᵢ]-zₙ)' + Cov[yₙ|fₙ]]
# TODO: allow for multi-dim cubature
S = np.sum(
w * ((lik_expectation - mu) * (lik_expectation - mu) + lik_covariance), axis=-1
)[:, None]
# Compute cross covariance C via cubature:
# C = ∫ (fₙ-mₙ) (E[yₙ|fₙ]-zₙ)' 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fsigᵢ -mₙ) (E[yₙ|fsigᵢ]-zₙ)'
C = np.sum(
w * (sigma_points - cav_mean) * (lik_expectation - mu), axis=-1
)[:, None]
# Compute derivative of z via cubature:
# omega = ∫ E[yₙ|fₙ] vₙ⁻¹ (fₙ-mₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ] vₙ⁻¹ (fsigᵢ-mₙ)
omega = np.sum(
w * lik_expectation * (inv(cav_cov) @ (sigma_points - cav_mean)), axis=-1
)[None, :]
return mu, S, C, omega
@partial(jit, static_argnums=(0, 4))
def statistical_linear_regression(self, m, v, hyp=None, cubature_func=None):
"""
If no custom SLR method is provided, we use cubature.
"""
return self.statistical_linear_regression_cubature(m, v, hyp, cubature_func)
@partial(jit, static_argnums=0)
def observation_model(self, f, sigma, hyp=None):
"""
The implicit observation model is:
h(fₙ,rₙ) = E[yₙ|fₙ] + √Cov[yₙ|fₙ] σₙ
"""
conditional_expectation, conditional_covariance = self.conditional_moments(f, hyp)
obs_model = conditional_expectation + cholesky(conditional_covariance) @ sigma
return np.squeeze(obs_model)
@partial(jit, static_argnums=0)
def analytical_linearisation(self, m, sigma=None, hyp=None):
"""
Compute the Jacobian of the state space observation model w.r.t. the
function fₙ and the noise term σₙ.
The implicit observation model is:
h(fₙ,rₙ) = E[yₙ|fₙ] + √Cov[yₙ|fₙ] σₙ
The Jacobians are evaluated at the means, fₙ=m, σₙ=0, to be used during
Extended Kalman filtering and Extended EP.
"""
sigma = np.array([[0.0]]) if sigma is None else sigma
Jf, Jsigma = jacrev(self.observation_model, argnums=(0, 1))(m, sigma, hyp)
return np.atleast_2d(np.squeeze(Jf)), np.atleast_2d(np.squeeze(Jsigma))
@partial(jit, static_argnums=(0, 5))
def variational_expectation_cubature(self, y, post_mean, post_cov, hyp=None, cubature_func=None):
"""
Computes the "variational expectation" via cubature, i.e. the
expected log-likelihood, and its derivatives w.r.t. the posterior mean
E[log p(yₙ|fₙ)] = ∫ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
:param y: observed data (yₙ) [scalar]
:param post_mean: posterior mean (mₙ) [scalar]
:param post_cov: posterior variance (vₙ) [scalar]
:param hyp: likelihood hyperparameter [scalar]
:param cubature_func: the function to compute sigma points and weights to use during cubature
:return:
exp_log_lik: the expected log likelihood, E[log p(yₙ|fₙ)] [scalar]
dE_dm: derivative of E[log p(yₙ|fₙ)] w.r.t. mₙ [scalar]
dE_dv: derivative of E[log p(yₙ|fₙ)] w.r.t. vₙ [scalar]
"""
if cubature_func is None:
x, w = gauss_hermite(post_mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature_func(post_mean.shape[0])
# fsigᵢ=xᵢ√(vₙ) + mₙ: scale locations according to cavity dist.
sigma_points = cholesky(post_cov) @ np.atleast_2d(x) + post_mean
# pre-compute wᵢ log p(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_log_likelihood_eval = w * self.evaluate_log_likelihood(y, sigma_points, hyp)
# Compute expected log likelihood via cubature:
# E[log p(yₙ|fₙ)] = ∫ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ p(yₙ|fsigᵢ)
exp_log_lik = np.sum(
weighted_log_likelihood_eval
)
# Compute first derivative via cubature:
# dE[log p(yₙ|fₙ)]/dmₙ = ∫ (fₙ-mₙ) vₙ⁻¹ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fₙ-mₙ) vₙ⁻¹ log p(yₙ|fsigᵢ)
invv = np.diag(post_cov)[:, None] ** -1
dE_dm = np.sum(
invv * (sigma_points - post_mean)
* weighted_log_likelihood_eval, axis=-1
)[:, None]
# Compute second derivative via cubature (deriv. w.r.t. var = 0.5 * 2nd deriv. w.r.t. mean):
# dE[log p(yₙ|fₙ)]/dvₙ = ∫ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹]/2 log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹]/2 log p(yₙ|fsigᵢ)
dE_dv = np.sum(
(0.5 * (invv ** 2 * (sigma_points - post_mean) ** 2) - 0.5 * invv)
* weighted_log_likelihood_eval, axis=-1
)
dE_dv = np.diag(dE_dv)
return exp_log_lik, dE_dm, dE_dv
@partial(jit, static_argnums=(0, 5))
def variational_expectation(self, y, m, v, hyp=None, cubature_func=None):
"""
If no custom variational expectation method is provided, we use cubature.
"""
return self.variational_expectation_cubature(y, m, v, hyp, cubature_func)
class Gaussian(Likelihood):
"""
The Gaussian likelihood:
p(yₙ|fₙ) = 𝓝(yₙ|fₙ,σ²)
"""
def __init__(self, variance=0.1):
"""
:param variance: The observation noise variance, σ²
"""
super().__init__(hyp=variance)
self.name = 'Gaussian'
@property
def variance(self):
return softplus(self.hyp)
@partial(jit, static_argnums=0)
def evaluate_likelihood(self, y, f, hyp=None):
"""
Evaluate the Gaussian function 𝓝(yₙ|fₙ,σ²).
Can be used to evaluate Q cubature points.
:param y: observed data yₙ [scalar]
:param f: mean, i.e. the latent function value fₙ [Q, 1]
:param hyp: likelihood variance σ² [scalar]
:return:
𝓝(yₙ|fₙ,σ²), where σ² is the observation noise [Q, 1]
"""
hyp = softplus(self.hyp) if hyp is None else hyp
return (2 * pi * hyp) ** -0.5 * np.exp(-0.5 * (y - f) ** 2 / hyp)
@partial(jit, static_argnums=0)
def evaluate_log_likelihood(self, y, f, hyp=None):
"""
Evaluate the log-Gaussian function log𝓝(yₙ|fₙ,σ²).
Can be used to evaluate Q cubature points.
:param y: observed data yₙ [scalar]
:param f: mean, i.e. the latent function value fₙ [Q, 1]
:param hyp: likelihood variance σ² [scalar]
:return:
log𝓝(yₙ|fₙ,σ²), where σ² is the observation noise [Q, 1]
"""
hyp = softplus(self.hyp) if hyp is None else hyp
return -0.5 * np.log(2 * pi * hyp) - 0.5 * (y - f) ** 2 / hyp
@partial(jit, static_argnums=0)
def conditional_moments(self, f, hyp=None):
"""
The first two conditional moments of a Gaussian are the mean and variance:
E[y|f] = f
Var[y|f] = σ²
"""
hyp = softplus(self.hyp) if hyp is None else hyp
return f, hyp.reshape(-1, 1)
@partial(jit, static_argnums=(0, 6))
def moment_match(self, y, cav_mean, cav_cov, hyp=None, power=1.0, cubature_func=None):
"""
Closed form Gaussian moment matching.
Calculates the log partition function of the EP tilted distribution:
logZₙ = log ∫ 𝓝ᵃ(yₙ|fₙ,σ²) 𝓝(fₙ|mₙ,vₙ) dfₙ = E[𝓝(yₙ|fₙ,σ²)]
and its derivatives w.r.t. mₙ, which are required for moment matching.
:param y: observed data (yₙ) [scalar]
:param cav_mean: cavity mean (mₙ) [scalar]
:param cav_cov: cavity variance (vₙ) [scalar]
:param hyp: observation noise variance (σ²) [scalar]
:param power: EP power / fraction (a) - this is never required for the Gaussian likelihood [scalar]
:param cubature_func: not used
:return:
lZ: the log partition function, logZₙ [scalar]
dlZ: first derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
d2lZ: second derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
"""
hyp = softplus(self.hyp) if hyp is None else hyp
return gaussian_moment_match(y, cav_mean, cav_cov, hyp)
class Bernoulli(Likelihood):
"""
Bernoulli likelihood is p(yₙ|fₙ) = Pʸ(1-P)⁽¹⁻ʸ⁾, where P = E[yₙ=1|fₙ].
Link function maps latent GP to [0,1].
The Probit link function, i.e. the Error Function Likelihood:
i.e. the Gaussian (Normal) cumulative density function:
P = E[yₙ=1|fₙ] = Φ(fₙ)
= ∫ 𝓝(x|0,1) dx, where the integral is over (-∞, fₙ],
The Normal CDF is calulcated using the error function:
= (1 + erf(fₙ / √2)) / 2
for erf(z) = (2/√π) ∫ exp(-x²) dx, where the integral is over [0, z]
The logit link function:
P = E[yₙ=1|fₙ] = 1 / 1 + exp(-fₙ)
"""
def __init__(self, link):
super().__init__(hyp=None)
if link == 'logit':
self.link_fn = lambda f: 1 / (1 + np.exp(-f))
self.dlink_fn = lambda f: np.exp(f) / (1 + np.exp(f)) ** 2
self.link = link
elif link == 'probit':
jitter = 1e-10
self.link_fn = lambda f: 0.5 * (1.0 + erf(f / np.sqrt(2.0))) * (1 - 2 * jitter) + jitter
self.dlink_fn = lambda f: grad(self.link_fn)(np.squeeze(f)).reshape(-1, 1)
self.link = link
else:
raise NotImplementedError('link function not implemented')
self.name = 'Bernoulli'
@partial(jit, static_argnums=0)
def evaluate_likelihood(self, y, f, hyp=None):
"""
:param y: observed data yₙ ϵ {-1, +1} [scalar]
:param f: latent function value fₙ ϵ ℝ
:param hyp: dummy input, Probit/Logit has no hyperparameters
:return:
p(yₙ|fₙ) = Pʸ(1-P)⁽¹⁻ʸ⁾
"""
return np.where(np.equal(y, 1), self.link_fn(f), 1 - self.link_fn(f))
@partial(jit, static_argnums=0)
def evaluate_log_likelihood(self, y, f, hyp=None):
"""
:param y: observed data yₙ ϵ {-1, +1} [scalar]
:param f: latent function value fₙ ϵ ℝ
:param hyp: dummy input, Probit has no hyperparameters
:return:
log p(yₙ|fₙ)
"""
return np.log(self.evaluate_likelihood(y, f))
@partial(jit, static_argnums=0)
def conditional_moments(self, f, hyp=None):
"""
The first two conditional moments of a Probit likelihood are:
E[yₙ|fₙ] = Φ(fₙ)
Var[yₙ|fₙ] = Φ(fₙ) (1 - Φ(fₙ))
"""
return self.link_fn(f), self.link_fn(f)-(self.link_fn(f)**2)
@partial(jit, static_argnums=(0, 5, 6))
def moment_match(self, y, m, v, hyp=None, power=1.0, cubature_func=None):
"""
Probit likelihood moment matching.
Calculates the log partition function of the EP tilted distribution:
logZₙ = log ∫ Φᵃ(yₙfₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
and its derivatives w.r.t. mₙ, which are required for moment matching.
If the EP fraction a = 1, we get
= log Φ(yₙzₙ), where zₙ = mₙ / √(1 + vₙ) [see Rasmussen & Williams p74]
otherwise we must use cubature to compute the log partition and its derivatives.
:param y: observed data (yₙ) [scalar]
:param m: cavity mean (mₙ) [scalar]
:param v: cavity variance (vₙ) [scalar]
:param hyp: dummy variable (Probit has no hyperparameters)
:param power: EP power / fraction (a) [scalar]
:param cubature_func: function returning the sigma points and weights for cubature
:return:
lZ: the log partition function, logZₙ [scalar]
dlZ: first derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
d2lZ: second derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
"""
y = np.sign(y) # only allow values of {0, 1}
if power == 1 and self.link == 'probit': # if a = 1, we can calculate the moments in closed form
y = np.sign(y - 0.01) # set zeros to -1 for closed form probit calc
z = m / np.sqrt(1.0 + v)
z = z * y # zₙ = yₙmₙ / √(1 + vₙ)
# logZₙ = log ∫ Φ(yₙfₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# = log Φ(yₙmₙ/√(1 + vₙ)) [see Rasmussen & Williams p74]
lZ, dlp = logphi(z)
# dlogZₙ/dmₙ = yₙ dlogΦ(zₙ)/dmₙ / √(1 + vₙ)
dlZ = y * dlp / np.sqrt(1.0 + v) # first derivative w.r.t mₙ
# d²logZₙ/dmₙ² = -dlogΦ(zₙ)/dmₙ (zₙ + dlogΦ(zₙ)/dmₙ) / √(1 + vₙ)
d2lZ = -dlp * (z + dlp) / (1.0 + v) # second derivative w.r.t mₙ
site_mean = m - dlZ / d2lZ # approx. likelihood (site) mean (see Rasmussen & Williams p75)
site_var = - (v + 1 / d2lZ) # approx. likelihood (site) variance
return lZ, site_mean, site_var
else:
# if a is not 1, we can calculate the moments via cubature
return self.moment_match_cubature(y, m, v, None, power, cubature_func)
@partial(jit, static_argnums=0)
def analytical_linearisation(self, m, sigma=None, hyp=None):
"""
Compute the Jacobian of the state space observation model w.r.t. the
function fₙ and the noise term σₙ.
"""
Jf = self.dlink_fn(m) + (
0.5 * (self.link_fn(m) * (1 - self.link_fn(m))) ** -0.5
* self.dlink_fn(m) * (1 - 2 * self.link_fn(m))
) * sigma
Jsigma = (self.link_fn(m) * (1 - self.link_fn(m))) ** 0.5
return Jf, Jsigma
class Probit(Bernoulli):
"""
The probit likelihood = Bernoulli likelihood with probit link.
"""
def __init__(self):
super().__init__(link='probit')
class Erf(Probit):
"""
The error function likelihood = probit = Bernoulli likelihood with probit link.
"""
pass
class Logit(Bernoulli):
"""
The logit likelihood = Bernoulli likelihood with logit link.
"""
def __init__(self):
super().__init__(link='logit')
class Logistic(Logit):
"""
The logistic likelihood = logit = Bernoulli likelihood with logit link.
"""
pass
class Poisson(Likelihood):
"""
The Poisson likelihood:
p(yₙ|fₙ) = Poisson(fₙ) = μʸ exp(-μ) / yₙ!
where μ = g(fₙ) = mean = variance is the Poisson intensity.
yₙ is non-negative integer count data.
No closed form moment matching is available, se we default to using cubature.
Letting Zy = gamma(yₙ+1) = yₙ!, we get log p(yₙ|fₙ) = log(g(fₙ))yₙ - g(fₙ) - log(Zy)
The larger the intensity μ, the stronger the likelihood resembles a Gaussian
since skewness = 1/sqrt(μ) and kurtosis = 1/μ.
Two possible link functions:
'exp': link(fₙ) = exp(fₙ), we have p(yₙ|fₙ) = exp(fₙyₙ-exp(fₙ)) / Zy.
'logistic': link(fₙ) = log(1+exp(fₙ))), we have p(yₙ|fₙ) = logʸ(1+exp(fₙ)))(1+exp(fₙ)) / Zy.
"""
def __init__(self, link='exp'):
"""
:param link: link function, either 'exp' or 'logistic'
"""
super().__init__(hyp=None)
if link == 'exp':
self.link_fn = lambda mu: np.exp(mu)
self.dlink_fn = lambda mu: np.exp(mu)
elif link == 'logistic':
self.link_fn = lambda mu: softplus(mu)
self.dlink_fn = lambda mu: sigmoid(mu)
else:
raise NotImplementedError('link function not implemented')
self.name = 'Poisson'
@partial(jit, static_argnums=0)
def evaluate_likelihood(self, y, f, hyp=None):
"""
Evaluate the Poisson likelihood:
p(yₙ|fₙ) = Poisson(fₙ) = μʸ exp(-μ) / yₙ!
for μ = g(fₙ), where g() is the link function (exponential or logistic).
We use the gamma function to evaluate yₙ! = gamma(yₙ + 1).
Can be used to evaluate Q cubature points when performing moment matching.
:param y: observed data (yₙ) [scalar]
:param f: latent function value (fₙ) [Q, 1]
:param hyp: dummy variable (Poisson has no hyperparameters)
:return:
Poisson(fₙ) = μʸ exp(-μ) / yₙ! [Q, 1]
"""
mu = self.link_fn(f)
return mu**y * np.exp(-mu) / np.exp(gammaln(y + 1))
@partial(jit, static_argnums=0)
def evaluate_log_likelihood(self, y, f, hyp=None):
"""
Evaluate the Poisson log-likelihood:
log p(yₙ|fₙ) = log Poisson(fₙ) = log(μʸ exp(-μ) / yₙ!)
for μ = g(fₙ), where g() is the link function (exponential or logistic).
We use the gamma function to evaluate yₙ! = gamma(yₙ + 1).
Can be used to evaluate Q cubature points when performing moment matching.
:param y: observed data (yₙ) [scalar]
:param f: latent function value (fₙ) [Q, 1]
:param hyp: dummy variable (Poisson has no hyperparameters)
:return:
log Poisson(fₙ) = log(μʸ exp(-μ) / yₙ!) [Q, 1]
"""
mu = self.link_fn(f)
return y * np.log(mu) - mu - gammaln(y + 1)
@partial(jit, static_argnums=0)
def observation_model(self, f, sigma, hyp=None):
"""
TODO: sort out broadcasting so we don't need this additional function (only difference is the transpose)
The implicit observation model is:
h(fₙ,rₙ) = E[yₙ|fₙ] + √Cov[yₙ|fₙ] σₙ
"""
conditional_expectation, conditional_covariance = self.conditional_moments(f, hyp)
obs_model = conditional_expectation + cholesky(conditional_covariance.T) @ sigma
return np.squeeze(obs_model)
@partial(jit, static_argnums=0)
def conditional_moments(self, f, hyp=None):
"""
The first two conditional moments of a Poisson distribution are equal to the intensity:
E[yₙ|fₙ] = link(fₙ)
Var[yₙ|fₙ] = link(fₙ)
"""
# return self.link_fn(f), self.link_fn(f)
return self.link_fn(f), vmap(np.diag, 1, 2)(self.link_fn(f))
@partial(jit, static_argnums=0)
def analytical_linearisation(self, m, sigma=None, hyp=None):
"""
Compute the Jacobian of the state space observation model w.r.t. the
function fₙ and the noise term σₙ.
"""
Jf = np.diag(np.squeeze(self.link_fn(m) + 0.5 * self.link_fn(m) ** -0.5 * self.dlink_fn(m) * sigma, axis=-1))
Jsigma = np.diag(np.squeeze(self.link_fn(m) ** 0.5, axis=-1))
return Jf, Jsigma
class HeteroscedasticNoise(Likelihood):
"""
The Heteroscedastic Noise likelihood:
p(y|f1,f2) = N(y|f1,link(f2)^2)
"""
def __init__(self, link='softplus'):
"""
:param link: link function, either 'exp' or 'softplus' (note that the link is modified with an offset)
"""
super().__init__(hyp=None)
if link == 'exp':
self.link_fn = lambda mu: np.exp(mu - 0.5)
self.dlink_fn = lambda mu: np.exp(mu - 0.5)
elif link == 'softplus':
self.link_fn = lambda mu: softplus(mu - 0.5) + 1e-10
self.dlink_fn = lambda mu: sigmoid(mu - 0.5)
else:
raise NotImplementedError('link function not implemented')
self.name = 'Heteroscedastic Noise'
@partial(jit, static_argnums=0)
def evaluate_likelihood(self, y, f, hyp=None):
"""
Evaluate the likelihood
"""
mu, var = self.conditional_moments(f)
return (2 * pi * var) ** -0.5 * np.exp(-0.5 * (y - mu) ** 2 / var)
@partial(jit, static_argnums=0)
def evaluate_log_likelihood(self, y, f, hyp=None):
"""
Evaluate the log-likelihood
"""
mu, var = self.conditional_moments(f)
return -0.5 * np.log(2 * pi * var) - 0.5 * (y - mu) ** 2 / var
@partial(jit, static_argnums=0)
def conditional_moments(self, f, hyp=None):
"""
"""
return f[0][None, ...], self.link_fn(f[1][None, ...]) ** 2
@partial(jit, static_argnums=(0, 6))
def moment_match(self, y, cav_mean, cav_cov, hyp=None, power=1.0, cubature_func=None):
"""
"""
if cubature_func is None:
x, w = gauss_hermite(1, 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature_func(1)
# sigma_points = np.sqrt(2) * np.sqrt(v) * x + m # scale locations according to cavity dist.
sigma_points = np.sqrt(cav_cov[1, 1]) * x + cav_mean[1] # fsigᵢ=xᵢ√cₙ + mₙ: scale locations according to cavity
f2 = self.link_fn(sigma_points) ** 2. / power
obs_var = f2 + cav_cov[0, 0]
const = power ** -0.5 * (2 * pi * self.link_fn(sigma_points) ** 2.) ** (0.5 - 0.5 * power)
normpdf = const * (2 * pi * obs_var) ** -0.5 * np.exp(-0.5 * (y - cav_mean[0, 0]) ** 2 / obs_var)
Z = np.sum(w * normpdf)
Zinv = 1. / np.maximum(Z, 1e-8)
lZ = np.log(np.maximum(Z, 1e-8))
dZ_integrand1 = (y - cav_mean[0, 0]) / obs_var * normpdf
dlZ1 = Zinv * np.sum(w * dZ_integrand1)
dZ_integrand2 = (sigma_points - cav_mean[1, 0]) / cav_cov[1, 1] * normpdf
dlZ2 = Zinv * np.sum(w * dZ_integrand2)
d2Z_integrand1 = (-(f2 + cav_cov[0, 0]) ** -1 + ((y - cav_mean[0, 0]) / obs_var) ** 2) * normpdf
d2lZ1 = -dlZ1 ** 2 + Zinv * np.sum(w * d2Z_integrand1)
d2Z_integrand2 = (-cav_cov[1, 1] ** -1 + ((sigma_points - cav_mean[1, 0]) / cav_cov[1, 1]) ** 2) * normpdf
d2lZ2 = -dlZ2 ** 2 + Zinv * np.sum(w * d2Z_integrand2)
dlZ = np.block([[dlZ1],
[dlZ2]])
d2lZ = np.block([[d2lZ1, 0],
[0., d2lZ2]])
id2lZ = inv(ensure_positive_precision(-d2lZ) - 1e-10 * np.eye(d2lZ.shape[0]))
site_mean = cav_mean + id2lZ @ dlZ # approx. likelihood (site) mean (see Rasmussen & Williams p75)
site_cov = power * (-cav_cov + id2lZ) # approx. likelihood (site) variance
return lZ, site_mean, site_cov
@partial(jit, static_argnums=0)
def log_expected_likelihood(self, y, x, w, cav_mean, cav_var, power):
sigma_points = np.sqrt(cav_var[1]) * x + cav_mean[1]
f2 = self.link_fn(sigma_points) ** 2. / power
obs_var = f2 + cav_var[0]
const = power ** -0.5 * (2 * pi * self.link_fn(sigma_points) ** 2.) ** (0.5 - 0.5 * power)
normpdf = const * (2 * pi * obs_var) ** -0.5 * np.exp(-0.5 * (y - cav_mean[0]) ** 2 / obs_var)
Z = np.sum(w * normpdf)
lZ = np.log(Z + 1e-8)
return lZ
@partial(jit, static_argnums=0)
def dlZ_dm(self, y, x, w, cav_mean, cav_var, power):
return jacrev(self.log_expected_likelihood, argnums=3)(y, x, w, cav_mean, cav_var, power)
@partial(jit, static_argnums=(0, 6))
def moment_match_unstable(self, y, cav_mean, cav_cov, hyp=None, power=1.0, cubature_func=None):
"""
TODO: Attempt to compute full site covariance, including cross terms. However, this makes things unstable.
"""
if cubature_func is None:
x, w = gauss_hermite(1, 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature_func(1)
lZ = self.log_expected_likelihood(y, x, w, np.squeeze(cav_mean), np.squeeze(np.diag(cav_cov)), power)
dlZ = self.dlZ_dm(y, x, w, np.squeeze(cav_mean), np.squeeze(np.diag(cav_cov)), power)[:, None]
d2lZ = jacrev(self.dlZ_dm, argnums=3)(y, x, w, np.squeeze(cav_mean), np.squeeze(np.diag(cav_cov)), power)
# d2lZ = np.diag(np.diag(d2lZ)) # discard cross terms
id2lZ = inv(ensure_positive_precision(-d2lZ) - 1e-10 * np.eye(d2lZ.shape[0]))
site_mean = cav_mean + id2lZ @ dlZ # approx. likelihood (site) mean (see Rasmussen & Williams p75)
site_cov = power * (-cav_cov + id2lZ) # approx. likelihood (site) variance
return lZ, site_mean, site_cov
@partial(jit, static_argnums=(0, 5))
def variational_expectation(self, y, m, v, hyp=None, cubature_func=None):
"""
"""
if cubature_func is None:
x, w = gauss_hermite(1, 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature_func(1)
m0, m1, v0, v1 = m[0, 0], m[1, 0], v[0, 0], v[1, 1]
sigma_points = np.sqrt(v1) * x + m1 # fsigᵢ=xᵢ√(2vₙ) + mₙ: scale locations according to cavity dist.
# pre-compute wᵢ log p(yₙ|xᵢ√(2vₙ) + mₙ)
var = self.link_fn(sigma_points) ** 2
log_lik = np.log(var) + var ** -1 * ((y - m0) ** 2 + v0)
weighted_log_likelihood_eval = w * log_lik
# Compute expected log likelihood via cubature:
# E[log p(yₙ|fₙ)] = ∫ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ p(yₙ|fsigᵢ)
exp_log_lik = -0.5 * np.log(2 * pi) - 0.5 * np.sum(
weighted_log_likelihood_eval
)
# Compute first derivative via cubature:
dE_dm1 = np.sum(
(var ** -1 * (y - m0 + v0)) * w
)
# dE[log p(yₙ|fₙ)]/dmₙ = ∫ (fₙ-mₙ) vₙ⁻¹ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fₙ-mₙ) vₙ⁻¹ log p(yₙ|fsigᵢ)
dE_dm2 = - 0.5 * np.sum(
weighted_log_likelihood_eval * v1 ** -1 * (sigma_points - m1)
)
# Compute derivative w.r.t. variance:
dE_dv1 = -0.5 * np.sum(
var ** -1 * w
)
# dE[log p(yₙ|fₙ)]/dvₙ = ∫ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹]/2 log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹]/2 log p(yₙ|fsigᵢ)
dE_dv2 = -0.25 * np.sum(
(v1 ** -2 * (sigma_points - m1) ** 2 - v1 ** -1)
* weighted_log_likelihood_eval
)
dE_dm = np.block([[dE_dm1],
[dE_dm2]])
dE_dv = np.block([[dE_dv1, 0],
[0., dE_dv2]])
return exp_log_lik, dE_dm, dE_dv
@partial(jit, static_argnums=(0, 4))
def statistical_linear_regression(self, cav_mean, cav_cov, hyp=None, cubature_func=None):
"""
Perform statistical linear regression (SLR) using cubature.
We aim to find a likelihood approximation p(yₙ|fₙ) ≈ 𝓝(yₙ|Afₙ+b,Ω+Var[yₙ|fₙ]).
"""
if cubature_func is None:
x, w = gauss_hermite(cav_mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature_func(cav_mean.shape[0])
m0, m1, v0, v1 = cav_mean[0, 0], cav_mean[1, 0], cav_cov[0, 0], cav_cov[1, 1]
# fsigᵢ=xᵢ√(vₙ) + mₙ: scale locations according to cavity dist.
sigma_points = cholesky(cav_cov) @ x + cav_mean
var = self.link_fn(sigma_points[1]) ** 2
# Compute zₙ via cubature:
# zₙ = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ]
mu = m0.reshape(1, 1)
# Compute variance S via cubature:
# S = ∫ [(E[yₙ|fₙ]-zₙ) (E[yₙ|fₙ]-zₙ)' + Cov[yₙ|fₙ]] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(E[yₙ|fsigᵢ]-zₙ) (E[yₙ|fsigᵢ]-zₙ)' + Cov[yₙ|fₙ]]
S = v0 + np.sum(
w * var
)
S = S.reshape(1, 1)
# Compute cross covariance C via cubature:
# C = ∫ (fₙ-mₙ) (E[yₙ|fₙ]-zₙ)' 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fsigᵢ -mₙ) (E[yₙ|fsigᵢ]-zₙ)'
C = np.sum(
w * (sigma_points - cav_mean) * (sigma_points[0] - m0), axis=-1
).reshape(2, 1)
# Compute derivative of z via cubature:
# omega = ∫ E[yₙ|fₙ] vₙ⁻¹ (fₙ-mₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ] vₙ⁻¹ (fsigᵢ-mₙ)
omega = np.block([[1., 0.]])
return mu, S, C, omega
@partial(jit, static_argnums=0)
def analytical_linearisation(self, m, sigma=None, hyp=None):
"""
Compute the Jacobian of the state space observation model w.r.t. the
function fₙ and the noise term σₙ.
"""
return np.block([[np.array(1.0), self.dlink_fn(m[1]) * sigma]]), self.link_fn(np.array([m[1]]))
class AudioAmplitudeDemodulation(Likelihood):
"""
The Audio Amplitude Demodulation likelihood
"""
def __init__(self, variance=0.1):
"""
param hyp: observation noise
"""
super().__init__(hyp=variance)
self.name = 'Audio Amplitude Demodulation'
self.link_fn = lambda f: softplus(f)
self.dlink_fn = lambda f: sigmoid(f) # derivative of the link function
@property
def variance(self):
return softplus(self.hyp)
@partial(jit, static_argnums=0)
def evaluate_likelihood(self, y, f, hyp=None):
"""
Evaluate the likelihood
"""
mu, var = self.conditional_moments(f, hyp)
return (2 * pi * var) ** -0.5 * np.exp(-0.5 * (y - mu) ** 2 / var)
@partial(jit, static_argnums=0)
def evaluate_log_likelihood(self, y, f, hyp=None):
"""
Evaluate the log-likelihood
"""
mu, var = self.conditional_moments(f, hyp)
return -0.5 * np.log(2 * pi * var) - 0.5 * (y - mu) ** 2 / var
@partial(jit, static_argnums=0)
def conditional_moments(self, f, hyp=None):
"""
"""
obs_noise_var = hyp if hyp is not None else self.hyp
num_components = int(f.shape[0] / 2)
subbands, modulators = f[:num_components], self.link_fn(f[num_components:])
return np.atleast_2d(np.sum(subbands * modulators, axis=0)), np.atleast_2d(obs_noise_var)
# return np.atleast_2d(modulators.T @ subbands), np.atleast_2d(obs_noise_var)
@partial(jit, static_argnums=(0, 6))
def moment_match(self, y, cav_mean, cav_cov, hyp=None, power=1.0, cubature_func=None):
"""
"""
num_components = int(cav_mean.shape[0] / 2)
if cubature_func is None:
x, w = gauss_hermite(num_components, 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature_func(num_components)
subband_mean, modulator_mean = cav_mean[:num_components], self.link_fn(cav_mean[num_components:])
subband_cov, modulator_cov = cav_cov[:num_components, :num_components], cav_cov[num_components:, num_components:]
sigma_points = cholesky(modulator_cov) @ x + modulator_mean
const = power ** -0.5 * (2 * pi * hyp) ** (0.5 - 0.5 * power)
mu = (self.link_fn(sigma_points).T @ subband_mean)[:, 0]
var = hyp / power + (self.link_fn(sigma_points).T ** 2 @ np.diag(subband_cov)[..., None])[:, 0]
normpdf = const * (2 * pi * var) ** -0.5 * np.exp(-0.5 * (y - mu) ** 2 / var)
Z = np.sum(w * normpdf)
Zinv = 1. / (Z + 1e-8)
lZ = np.log(Z + 1e-8)
dZ1 = np.sum(w * self.link_fn(sigma_points) * (y - mu) / var * normpdf, axis=-1)
dZ2 = np.sum(w * (sigma_points - modulator_mean) * np.diag(modulator_cov)[..., None] ** -1 * normpdf, axis=-1)
dlZ = Zinv * np.block([dZ1, dZ2])
d2Z1 = np.sum(w * self.link_fn(sigma_points) ** 2 * (
((y - mu) / var) ** 2
- var ** -1
) * normpdf, axis=-1)
d2Z2 = np.sum(w * (
((sigma_points - modulator_mean) * np.diag(modulator_cov)[..., None] ** -1) ** 2
- np.diag(modulator_cov)[..., None] ** -1
) * normpdf, axis=-1)
d2lZ = np.diag(-dlZ ** 2 + Zinv * np.block([d2Z1, d2Z2]))
id2lZ = inv(ensure_positive_precision(-d2lZ) - 1e-10 * np.eye(d2lZ.shape[0]))
site_mean = cav_mean + id2lZ @ dlZ[..., None] # approx. likelihood (site) mean (see Rasmussen & Williams p75)
site_cov = power * (-cav_cov + id2lZ) # approx. likelihood (site) variance
return lZ, site_mean, site_cov
@partial(jit, static_argnums=0)
def analytical_linearisation(self, m, sigma=None, hyp=None):
"""
"""
obs_noise_var = hyp if hyp is not None else self.hyp
num_components = int(m.shape[0] / 2)
subbands, modulators = m[:num_components], self.link_fn(m[num_components:])
Jf = np.block([[modulators], [subbands * self.dlink_fn(m[num_components:])]])
Jsigma = np.array([[np.sqrt(obs_noise_var)]])
return np.atleast_2d(Jf).T, np.atleast_2d(Jsigma).T
@partial(jit, static_argnums=(0, 4))
def statistical_linear_regression(self, cav_mean, cav_cov, hyp=None, cubature_func=None):
"""
This gives the same result as above - delete
"""
num_components = int(cav_mean.shape[0] / 2)
if cubature_func is None:
x, w = gauss_hermite(num_components, 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature_func(num_components)
subband_mean, modulator_mean = cav_mean[:num_components], self.link_fn(cav_mean[num_components:])
subband_cov, modulator_cov = cav_cov[:num_components, :num_components], cav_cov[num_components:,
num_components:]
sigma_points = cholesky(modulator_cov) @ x + modulator_mean
lik_expectation, lik_covariance = (self.link_fn(sigma_points).T @ subband_mean).T, hyp
# Compute zₙ via cubature:
# muₙ = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ]
mu = np.sum(
w * lik_expectation, axis=-1
)[:, None]
# Compute variance S via cubature:
# S = ∫ [(E[yₙ|fₙ]-zₙ) (E[yₙ|fₙ]-zₙ)' + Cov[yₙ|fₙ]] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(E[yₙ|fsigᵢ]-zₙ) (E[yₙ|fsigᵢ]-zₙ)' + Cov[yₙ|fₙ]]
S = np.sum(
w * ((lik_expectation - mu) * (lik_expectation - mu) + lik_covariance), axis=-1
)[:, None]
# Compute cross covariance C via cubature:
# C = ∫ (fₙ-mₙ) (E[yₙ|fₙ]-zₙ)' 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fsigᵢ -mₙ) (E[yₙ|fsigᵢ]-zₙ)'
C = np.sum(
w * np.block([[self.link_fn(sigma_points) * np.diag(subband_cov)[..., None]],
[sigma_points - modulator_mean]]) * (lik_expectation - mu), axis=-1
)[:, None]
# Compute derivative of mu via cubature:
omega = np.sum(
w * np.block([[self.link_fn(sigma_points)],
[np.diag(modulator_cov)[..., None] ** -1 * (sigma_points - modulator_mean) * lik_expectation]]), axis=-1
)[None, :]
return mu, S, C, omega
@partial(jit, static_argnums=(0, 5))
def variational_expectation(self, y, post_mean, post_cov, hyp=None, cubature_func=None):
"""
"""
num_components = int(post_mean.shape[0] / 2)
if cubature_func is None:
x, w = gauss_hermite(num_components, 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature_func(num_components)
subband_mean, modulator_mean = post_mean[:num_components], self.link_fn(post_mean[num_components:])
subband_cov, modulator_cov = post_cov[:num_components, :num_components], post_cov[num_components:,
num_components:]
sigma_points = cholesky(modulator_cov) @ x + modulator_mean
modulator_var = np.diag(subband_cov)[..., None]
mu = (self.link_fn(sigma_points).T @ subband_mean)[:, 0]
lognormpdf = -0.5 * np.log(2 * pi * hyp) - 0.5 * (y - mu) ** 2 / hyp
const = -0.5 / hyp * (self.link_fn(sigma_points).T ** 2 @ modulator_var)[:, 0]
exp_log_lik = np.sum(w * (lognormpdf + const))
dE1 = np.sum(w * self.link_fn(sigma_points) * (y - mu) / hyp, axis=-1)
dE2 = np.sum(w * (sigma_points - modulator_mean) * modulator_var ** -1
* (lognormpdf + const), axis=-1)
dE_dm = np.block([dE1, dE2])[..., None]
d2E1 = np.sum(w * - 0.5 * self.link_fn(sigma_points) ** 2 / hyp, axis=-1)
d2E2 = np.sum(w * 0.5 * (
((sigma_points - modulator_mean) * modulator_var ** -1) ** 2
- modulator_var ** -1
) * (lognormpdf + const), axis=-1)
dE_dv = np.diag(np.block([d2E1, d2E2]))
return exp_log_lik, dE_dm, dE_dv
@partial(jit, static_argnums=0)
def analytical_linearisation(self, m, sigma=None, hyp=None):
"""
Compute the Jacobian of the state space observation model w.r.t. the
function fₙ and the noise term σₙ.
"""
num_components = int(m.shape[0] / 2)
Jf = np.block([[self.link_fn(m[num_components:])], [m[:num_components] * self.dlink_fn(m[num_components:])]]).T
Jsigma = np.array([[hyp ** 0.5]])
return Jf, Jsigma
|
apache-2.0
|
potsmaster/cinder
|
cinder/openstack/common/scheduler/filters/json_filter.py
|
22
|
4914
|
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from oslo_serialization import jsonutils
import six
from cinder.openstack.common.scheduler import filters
class JsonFilter(filters.BaseHostFilter):
"""Host Filter to allow simple JSON-based grammar for
selecting hosts.
"""
def _op_compare(self, args, op):
"""Returns True if the specified operator can successfully
compare the first item in the args with all the rest. Will
return False if only one item is in the list.
"""
if len(args) < 2:
return False
if op is operator.contains:
bad = args[0] not in args[1:]
else:
bad = [arg for arg in args[1:]
if not op(args[0], arg)]
return not bool(bad)
def _equals(self, args):
"""First term is == all the other terms."""
return self._op_compare(args, operator.eq)
def _less_than(self, args):
"""First term is < all the other terms."""
return self._op_compare(args, operator.lt)
def _greater_than(self, args):
"""First term is > all the other terms."""
return self._op_compare(args, operator.gt)
def _in(self, args):
"""First term is in set of remaining terms."""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
"""First term is <= all the other terms."""
return self._op_compare(args, operator.le)
def _greater_than_equal(self, args):
"""First term is >= all the other terms."""
return self._op_compare(args, operator.ge)
def _not(self, args):
"""Flip each of the arguments."""
return [not arg for arg in args]
def _or(self, args):
"""True if any arg is True."""
return any(args)
def _and(self, args):
"""True if all args are True."""
return all(args)
commands = {
'=': _equals,
'<': _less_than,
'>': _greater_than,
'in': _in,
'<=': _less_than_equal,
'>=': _greater_than_equal,
'not': _not,
'or': _or,
'and': _and,
}
def _parse_string(self, string, host_state):
"""Strings prefixed with $ are capability lookups in the
form '$variable' where 'variable' is an attribute in the
HostState class. If $variable is a dictionary, you may
use: $variable.dictkey
"""
if not string:
return None
if not string.startswith("$"):
return string
path = string[1:].split(".")
obj = getattr(host_state, path[0], None)
if obj is None:
return None
for item in path[1:]:
obj = obj.get(item)
if obj is None:
return None
return obj
def _process_filter(self, query, host_state):
"""Recursively parse the query structure."""
if not query:
return True
cmd = query[0]
method = self.commands[cmd]
cooked_args = []
for arg in query[1:]:
if isinstance(arg, list):
arg = self._process_filter(arg, host_state)
elif isinstance(arg, six.string_types):
arg = self._parse_string(arg, host_state)
if arg is not None:
cooked_args.append(arg)
result = method(self, cooked_args)
return result
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can fulfill the requirements
specified in the query.
"""
# TODO(zhiteng) Add description for filter_properties structure
# and scheduler_hints.
try:
query = filter_properties['scheduler_hints']['query']
except KeyError:
query = None
if not query:
return True
# NOTE(comstud): Not checking capabilities or service for
# enabled/disabled so that a provided json filter can decide
result = self._process_filter(jsonutils.loads(query), host_state)
if isinstance(result, list):
# If any succeeded, include the host
result = any(result)
if result:
# Filter it out.
return True
return False
|
apache-2.0
|
kobejean/tensorflow
|
tensorflow/python/eager/execution_callbacks.py
|
11
|
11832
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Execution Callbacks for Eager Mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import execute
from tensorflow.python.platform import tf_logging as logging
_DEFAULT_CALLBACK_ACTION = "raise"
_VALID_CALLBACK_ACTIONS = (None, "ignore", "print", "raise", "warn")
# TODO(cais): Consider moving this exception class to errors_impl.py.
class InfOrNanError(Exception):
"""Exception for inf and/or nan being present in tensor."""
def __init__(self,
op_type,
op_name,
output_index,
num_outputs,
value):
"""Constructor of InfOrNanError.
Args:
op_type: Type name of the op that generated the tensor that generated the
`inf`(s) or `nan`(s) (e.g., `Div`).
op_name: Name of the op that generated the tensor with `inf`(s) or
`nan`(s). This name is set by client and can be `None` if it is unset.
output_index: The 0-based output index of the tensor that contains
`inf`(s) or `nan`(s).
num_outputs: Total number of outputs of the operation.
value: The tensor value that contains `inf`(s) or `nan`(s).
"""
self._op_type = op_type
self._op_name = op_name
self._output_index = output_index
self._num_outputs = num_outputs
self._value = value
self._total_count = np.size(value)
self._inf_count = np.count_nonzero(np.isinf(value))
self._nan_count = np.count_nonzero(np.isnan(value))
super(InfOrNanError, self).__init__(self._get_error_message())
def _get_error_message(self):
"""Get the error message describing this InfOrNanError object."""
name_str = (("'%s'" % self._op_name) if self._op_name is not None
else str(self._op_name))
msg = "Output %d of %d of TFE operation %s (name: %s) contains " % (
self._output_index + 1, self._num_outputs, self._op_type, name_str)
if self._inf_count and self._nan_count:
msg += "%d inf(s) and %d nan(s) " % (self._inf_count, self._nan_count)
elif self._inf_count:
msg += "%d inf(s) " % self._inf_count
else:
msg += "%d nan(s) " % self._nan_count
msg += "out of a total of %d element(s). Tensor value: %s" % (
self._total_count, self._value)
return msg
@property
def op_type(self):
return self._op_type
@property
def op_name(self):
return self._op_name
@property
def output_index(self):
return self._output_index
@property
def num_outputs(self):
return self._num_outputs
@property
def value(self):
return self._value
def inf_nan_callback(op_type,
inputs,
attrs,
outputs,
op_name,
check_inf=True,
check_nan=True,
action=_DEFAULT_CALLBACK_ACTION):
"""An execution callback that checks for `inf`s and `nan`s in output tensors.
This callback can be used with `tfe.add_execute_callback` to check for invalid
numeric values. E.g.,
```python
tfe.add_execute_callback(tfe.inf_nan_callback)
```
Args:
op_type: Name of the TFE operation type (e.g., `MatMul`).
inputs: The `list` of input tensors to the operation, currently unused by
this callback.
attrs: Attributes of the TFE operation, as a tuple of alternating attribute
names and attribute values.
outputs: The `list` of output tensors from the operation, checked by this
callback for `inf` and `nan` values.
op_name: Name of the TFE operation. This name is set by client and can be
`None` if it unset.
check_inf: (`bool`) Whether this callback should check for `inf` values in
the output tensor values.
check_nan: (`bool`) Whether this callback should check for `nan` values in
the output tensor values.
action: (`str`) Action to be taken by the callback when `inf` or `nan`
values are detected. Possible values {"raise", "warn", "print"}
`"raise"`: Raise a `InfOrNanError`.
`"warn"`: Log a warning using `tf.logging.warn`.
`"print"`: Print a message to `sys.stdout`.
Raises:
InfOrNanError: iff `inf` or `nan` values are seen in any of `outputs` and
`action` is `"raise"`.
ValueError: iff the value of `action` is invalid.
"""
del attrs, inputs # Not used.
ctx = context.context()
for index, output in enumerate(outputs):
if not output.dtype.is_numpy_compatible:
continue
numpy_dtype = output.dtype.as_numpy_dtype
if (np.issubdtype(numpy_dtype, np.floating) or
np.issubdtype(numpy_dtype, np.complex) or
np.issubdtype(numpy_dtype, np.integer)):
try:
check_numerics_op_attrs = (
"message", "Eager-mode inf/nan check",
"T", outputs[0].dtype.as_datatype_enum)
# TODO(cais): Consider moving this into execute.py.
# pylint: disable=protected-access
pywrap_tensorflow.TFE_Py_Execute(
ctx._handle, output.device, "CheckNumerics", [output],
check_numerics_op_attrs, 1)
# pylint: enable=protected-access
except core._NotOkStatusException: # pylint: disable=protected-access
value = output.numpy()
inf_detected = np.any(np.isinf(value)) and check_inf
nan_detected = np.any(np.isnan(value)) and check_nan
if not inf_detected and not nan_detected:
continue
error = InfOrNanError(op_type, op_name, index, len(outputs), value)
if action == "print":
print("Warning: %s" % str(error))
elif action == "warn":
logging.warn(str(error))
elif action == "raise":
raise error
else:
raise ValueError(
"Invalid action for inf_nan_callback: %s. Valid actions are: "
"{print | warn | raise}" % action)
def inf_callback(op_type,
inputs,
attrs,
outputs,
op_name,
action=_DEFAULT_CALLBACK_ACTION):
"""A specialization of `inf_nan_callback` that checks for `inf`s only."""
inf_nan_callback(
op_type,
inputs,
attrs,
outputs,
op_name,
check_inf=True,
check_nan=False,
action=action)
def nan_callback(op_type,
inputs,
attrs,
outputs,
op_name,
action=_DEFAULT_CALLBACK_ACTION):
"""A specialization of `inf_nan_callback` that checks for `nan`s only."""
inf_nan_callback(
op_type,
inputs,
attrs,
outputs,
op_name,
check_inf=False,
check_nan=True,
action=action)
def add_execution_callback(callback):
"""Add an execution callback to the default eager context.
An execution callback is invoked immediately after an eager operation or
function has finished execution, providing access to the op's type, name
input and output tensors. Multiple execution callbacks can be added, in
which case the callbacks will be invoked in the order in which they are
added. To clear all execution callbacks that have been added, use
`clear_execution_callbacks()`.
Example:
```python
def print_even_callback(op_type, op_name, attrs, inputs, outputs):
# A callback that prints only the even output values.
if outputs[0].numpy() % 2 == 0:
print("Even output from %s: %s" % (op_name or op_type, outputs))
tfe.add_execution_callback(print_even_callback)
x = tf.pow(2.0, 3.0) - 3.0
y = tf.multiply(x, tf.add(1.0, 5.0))
# When the line above is run, you will see all intermediate outputs that are
# even numbers printed to the console.
tfe.clear_execution_callbacks()
```
Args:
callback: a callable of the signature
`f(op_type, op_name, attrs, inputs, outputs)`.
`op_type` is the type of the operation that was just executed (e.g.,
`MatMul`).
`op_name` is the name of the operation that was just executed. This
name is set by the client who created the operation and can be `None` if
it is unset.
`attrs` contains the attributes of the operation as a `tuple` of
alternating attribute name and attribute value.
`inputs` is the `list` of input `Tensor`(s) to the op.
`outputs` is the `list` of output `Tensor`(s) from the op.
Return value(s) from the callback are ignored.
"""
execute.execute = execute.execute_with_callbacks
context.context().add_post_execution_callback(callback)
def clear_execution_callbacks():
"""Clear all execution callbacks from the default eager context."""
context.context().clear_post_execution_callbacks()
def seterr(inf_or_nan=None):
"""Set how abnormal conditions are handled by the default eager context.
Example:
```python
tfe.seterr(inf_or_nan="raise")
a = tf.constant(10.0)
b = tf.constant(0.0)
try:
c = a / b # <-- Raises InfOrNanError.
except Exception as e:
print("Caught Exception: %s" % e)
tfe.seterr(inf_or_nan="ignore")
c = a / b # <-- Does NOT raise exception anymore.
```
Args:
inf_or_nan: Set action for infinity (`inf`) and NaN (`nan`) values.
Possible values: `{"ignore", "print", "raise", "warn"}`.
`"ignore"`: take no action when `inf` values appear.
`"print"`: print a warning to `stdout`.
`"raise"`: raise an `InfOrNanError`.
`"warn"`: print a warning using `tf.logging.warn`.
A value of `None` leads to no change in the action of the condition.
Returns:
A dictionary of old actions.
Raises:
ValueError: If the value of any keyword arguments is invalid.
"""
if inf_or_nan not in _VALID_CALLBACK_ACTIONS:
raise ValueError(
"Invalid action value for inf_or_nan: %s. "
"Valid actions are %s." % (inf_or_nan, _VALID_CALLBACK_ACTIONS))
old_settings = {"inf_or_nan": "ignore"}
default_context = context.context()
carryover_callbacks = []
for callback in default_context.post_execution_callbacks:
# Check whether the callback is inf_nan_callback or a partial object of
# inf_nan_callback.
if (callback == inf_nan_callback or
isinstance(callback, functools.partial) and
callback.func == inf_nan_callback):
if callback == inf_nan_callback:
old_settings["inf_or_nan"] = _DEFAULT_CALLBACK_ACTION
else:
old_settings["inf_or_nan"] = callback.keywords.get(
"action", _DEFAULT_CALLBACK_ACTION)
elif inf_or_nan is not None:
carryover_callbacks.append(callback)
if inf_or_nan is not None:
default_context.clear_post_execution_callbacks()
for callback in carryover_callbacks:
default_context.add_post_execution_callback(callback)
if inf_or_nan != "ignore":
default_context.add_post_execution_callback(
functools.partial(inf_nan_callback, action=inf_or_nan))
return old_settings
|
apache-2.0
|
lucastan/django-starter-box
|
djcdn/filters/__init__.py
|
1
|
3858
|
from __future__ import unicode_literals
import re
import os.path
import subprocess
import shutil
import tempfile
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import cssmin as cssmin_mod
import slimit as slimit_mod
from django.conf import settings
from django.core.files import File
from django.core.files.base import ContentFile
from django.utils.translation import force_text
_CSS_URL_MATCH = re.compile(
r'(?<!\w)url\([ \t]*(?P<quote>[\'"]?)(?P<url>.*?)(?P=quote)[ \t]*\)|'+
r'@import[ \t]+(?P<quote1>[\'"]?)(?P<url1>.*?)(?P=quote1)',
re.IGNORECASE
)
def cssmin(input_file):
"""
:type input_file: File
:returns: File
"""
content_bytes = input_file.read()
output_bytes = cssmin_mod.cssmin(content_bytes)
return ContentFile(output_bytes)
def _transform_url(url, version_str=None):
url = url.strip()
if url.startswith(settings.STATIC_ROOT):
root_len = len(settings.STATIC_ROOT)
ver = ''
if version_str:
ver = version_str + '/'
url = settings.STATIC_URL + ver + url[root_len:].lstrip('/')
return url
def csspath(input_file, version_str=None):
"""
:type input_file: File
:returns: File
"""
input_str = force_text(input_file.read())
cur = 0
output = StringIO()
matches = _CSS_URL_MATCH.finditer(input_str)
for match in matches:
url = match.group('url')
if url is None:
url = match.group('url1')
start, end = match.span('url1')
else:
start, end = match.span('url')
output.write(input_str[cur:start])
output.write(_transform_url(url, version_str=version_str).encode('utf8'))
cur = end
output.write(input_str[cur:])
output.seek(0)
return File(output)
def slimit(input_file):
"""
:type input_file: File
:returns: File
"""
content_bytes = input_file.read()
output_bytes = slimit_mod.minify(content_bytes, mangle=True, mangle_toplevel=False)
return ContentFile(output_bytes)
def jpegoptim(input_file):
file_path = getattr(input_file,'name', None)
if not file_path:
# We need a real file due to the work jpegoptim works.
print('ERROR: JPEG file has no filename')
return input_file
if not os.path.isfile(file_path):
print('ERROR: JPEG file does not exist: %s' % file_path)
return input_file
tmp_path = tempfile.mkdtemp()
subprocess.call(["jpegoptim", "--strip-all", file_path, '-d', tmp_path])
file_name = os.path.basename(file_path)
old_out_path = os.path.join(tmp_path, file_name)
if not os.path.isfile(old_out_path):
print('ERROR: JPEG output file does not exist: %s' % out_path)
return input_file
#
# Move the output file to a new temp file so we can remove the temp dir.
#
(out_handle, out_path) = tempfile.mkstemp(text=False)
os.rename(old_out_path, out_path)
os.rmdir(tmp_path)
return File(open(out_path, 'rb'))
def pngcrush(input_file):
file_path = getattr(input_file,'name', None)
if not file_path:
# We need a real file due to the work jpegoptim works.
print('ERROR: PNG file has no filename')
return input_file
if not os.path.isfile(file_path):
print('ERROR: PNG file does not exist: %s' % file_path)
return input_file
(out_handle, out_path) = tempfile.mkstemp(text=False)
# make the tool quiet as it makes too much noise :)
subprocess.call(["pngcrush", '-q', "-brute", '-reduce', file_path, out_path])
if not os.path.isfile(out_path):
print('ERROR: PNG output file does not exist: %s' % out_path)
return input_file
return File(open(out_path, 'rb'))
|
mit
|
wallnerryan/quantum_migrate
|
quantum/tests/unit/test_routerserviceinsertion.py
|
2
|
18424
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import testtools
import webob.exc as webexc
import quantum
from quantum.api import extensions
from quantum.api.v2 import attributes
from quantum.api.v2 import router
from quantum.common import config
from quantum import context as q_context
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import l3_db
from quantum.db.loadbalancer import loadbalancer_db as lb_db
from quantum.db import routedserviceinsertion_db as rsi_db
from quantum.db import routerservicetype_db as rst_db
from quantum.db import servicetype_db as st_db
from quantum.extensions import routedserviceinsertion as rsi
from quantum.extensions import routerservicetype as rst
from quantum.plugins.common import constants
from quantum.tests import base
from quantum.tests.unit import test_api_v2
from quantum.tests.unit import testlib_api
from quantum import wsgi
_uuid = test_api_v2._uuid
_get_path = test_api_v2._get_path
extensions_path = ':'.join(quantum.extensions.__path__)
class RouterServiceInsertionTestPlugin(
rst_db.RouterServiceTypeDbMixin,
rsi_db.RoutedServiceInsertionDbMixin,
st_db.ServiceTypeManager,
lb_db.LoadBalancerPluginDb,
l3_db.L3_NAT_db_mixin,
db_base_plugin_v2.QuantumDbPluginV2):
supported_extension_aliases = [
"router", "router-service-type", "routed-service-insertion",
"service-type", "lbaas"
]
def create_router(self, context, router):
with context.session.begin(subtransactions=True):
r = super(RouterServiceInsertionTestPlugin, self).create_router(
context, router)
service_type_id = router['router'].get(rst.SERVICE_TYPE_ID)
if service_type_id is not None:
r[rst.SERVICE_TYPE_ID] = service_type_id
self._process_create_router_service_type_id(
context, r)
return r
def get_router(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
r = super(RouterServiceInsertionTestPlugin, self).get_router(
context, id, fields)
rsbind = self._get_router_service_type_id_binding(context, id)
if rsbind:
r[rst.SERVICE_TYPE_ID] = rsbind['service_type_id']
return r
def delete_router(self, context, id):
with context.session.begin(subtransactions=True):
super(RouterServiceInsertionTestPlugin, self).delete_router(
context, id)
rsbind = self._get_router_service_type_id_binding(context, id)
if rsbind:
raise Exception('Router service-type binding is not deleted')
def create_resource(self, res, context, resource, model):
with context.session.begin(subtransactions=True):
method_name = "create_{0}".format(res)
method = getattr(super(RouterServiceInsertionTestPlugin, self),
method_name)
o = method(context, resource)
router_id = resource[res].get(rsi.ROUTER_ID)
if router_id is not None:
o[rsi.ROUTER_ID] = router_id
self._process_create_resource_router_id(
context, o, model)
return o
def get_resource(self, res, context, id, fields, model):
method_name = "get_{0}".format(res)
method = getattr(super(RouterServiceInsertionTestPlugin, self),
method_name)
o = method(context, id, fields)
if fields is None or rsi.ROUTER_ID in fields:
rsbind = self._get_resource_router_id_binding(
context, id, model)
if rsbind:
o[rsi.ROUTER_ID] = rsbind['router_id']
return o
def delete_resource(self, res, context, id, model):
method_name = "delete_{0}".format(res)
with context.session.begin(subtransactions=True):
method = getattr(super(RouterServiceInsertionTestPlugin, self),
method_name)
method(context, id)
self._delete_resource_router_id_binding(context, id, model)
if self._get_resource_router_id_binding(context, id, model):
raise Exception("{0}-router binding is not deleted".format(res))
def create_pool(self, context, pool):
return self.create_resource('pool', context, pool, lb_db.Pool)
def get_pool(self, context, id, fields=None):
return self.get_resource('pool', context, id, fields, lb_db.Pool)
def delete_pool(self, context, id):
return self.delete_resource('pool', context, id, lb_db.Pool)
def create_health_monitor(self, context, health_monitor):
return self.create_resource('health_monitor', context, health_monitor,
lb_db.HealthMonitor)
def get_health_monitor(self, context, id, fields=None):
return self.get_resource('health_monitor', context, id, fields,
lb_db.HealthMonitor)
def delete_health_monitor(self, context, id):
return self.delete_resource('health_monitor', context, id,
lb_db.HealthMonitor)
def create_vip(self, context, vip):
return self.create_resource('vip', context, vip, lb_db.Vip)
def get_vip(self, context, id, fields=None):
return self.get_resource(
'vip', context, id, fields, lb_db.Vip)
def delete_vip(self, context, id):
return self.delete_resource('vip', context, id, lb_db.Vip)
def stats(self, context, pool_id):
pass
class RouterServiceInsertionTestCase(base.BaseTestCase):
def setUp(self):
super(RouterServiceInsertionTestCase, self).setUp()
plugin = (
"quantum.tests.unit.test_routerserviceinsertion."
"RouterServiceInsertionTestPlugin"
)
# point config file to: quantum/tests/etc/quantum.conf.test
args = ['--config-file', test_api_v2.etcdir('quantum.conf.test')]
config.parse(args=args)
#just stubbing core plugin with LoadBalancer plugin
cfg.CONF.set_override('core_plugin', plugin)
cfg.CONF.set_override('service_plugins', [plugin])
cfg.CONF.set_override('quota_router', -1, group='QUOTAS')
self.addCleanup(cfg.CONF.reset)
# Ensure 'stale' patched copies of the plugin are never returned
quantum.manager.QuantumManager._instance = None
# Ensure the database is reset between tests
db._ENGINE = None
db._MAKER = None
# Ensure existing ExtensionManager is not used
ext_mgr = extensions.PluginAwareExtensionManager(
extensions_path,
{constants.LOADBALANCER: RouterServiceInsertionTestPlugin()}
)
extensions.PluginAwareExtensionManager._instance = ext_mgr
router.APIRouter()
app = config.load_paste_app('extensions_test_app')
self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1"
res = self._do_request('GET', _get_path('service-types'))
self._service_type_id = res['service_types'][0]['id']
self._setup_core_resources()
# FIXME (markmcclain): The test setup makes it difficult to add core
# via the api. In the interim we'll create directly using the plugin with
# the side effect of polluting the fixture database until tearDown.
def _setup_core_resources(self):
core_plugin = quantum.manager.QuantumManager.get_plugin()
self._network = core_plugin.create_network(
q_context.get_admin_context(),
{
'network':
{
'tenant_id': self._tenant_id,
'name': 'test net',
'admin_state_up': True,
'shared': False,
}
}
)
self._subnet = core_plugin.create_subnet(
q_context.get_admin_context(),
{
'subnet':
{
'network_id': self._network['id'],
'name': 'test subnet',
'cidr': '192.168.1.0/24',
'ip_version': 4,
'gateway_ip': '192.168.1.1',
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
'host_routes': attributes.ATTR_NOT_SPECIFIED,
'enable_dhcp': True,
}
}
)
self._subnet_id = self._subnet['id']
def _do_request(self, method, path, data=None, params=None, action=None):
content_type = 'application/json'
body = None
if data is not None: # empty dict is valid
body = wsgi.Serializer().serialize(data, content_type)
req = testlib_api.create_request(
path, body, content_type,
method, query_string=params)
res = req.get_response(self._api)
if res.status_code >= 400:
raise webexc.HTTPClientError(detail=res.body, code=res.status_code)
if res.status_code != webexc.HTTPNoContent.code:
return res.json
def _router_create(self, service_type_id=None):
data = {
"router": {
"tenant_id": self._tenant_id,
"name": "test",
"admin_state_up": True,
"service_type_id": service_type_id,
}
}
res = self._do_request('POST', _get_path('routers'), data)
return res['router']
def test_router_create_no_service_type_id(self):
router = self._router_create()
self.assertEqual(router.get('service_type_id'), None)
def test_router_create_with_service_type_id(self):
router = self._router_create(self._service_type_id)
self.assertEqual(router['service_type_id'], self._service_type_id)
def test_router_get(self):
router = self._router_create(self._service_type_id)
res = self._do_request('GET',
_get_path('routers/{0}'.format(router['id'])))
self.assertEqual(res['router']['service_type_id'],
self._service_type_id)
def _test_router_update(self, update_service_type_id):
router = self._router_create(self._service_type_id)
router_id = router['id']
new_name = _uuid()
data = {
"router": {
"name": new_name,
"admin_state_up": router['admin_state_up'],
}
}
if update_service_type_id:
data["router"]["service_type_id"] = _uuid()
with testtools.ExpectedException(
webexc.HTTPClientError) as ctx_manager:
res = self._do_request(
'PUT', _get_path('routers/{0}'.format(router_id)), data)
self.assertEqual(ctx_manager.exception.code, 400)
else:
res = self._do_request(
'PUT', _get_path('routers/{0}'.format(router_id)), data)
res = self._do_request(
'GET', _get_path('routers/{0}'.format(router['id'])))
self.assertEqual(res['router']['name'], new_name)
def test_router_update_with_service_type_id(self):
self._test_router_update(True)
def test_router_update_without_service_type_id(self):
self._test_router_update(False)
def test_router_delete(self):
router = self._router_create(self._service_type_id)
self._do_request(
'DELETE', _get_path('routers/{0}'.format(router['id'])))
def _test_lb_setup(self):
router = self._router_create(self._service_type_id)
self._router_id = router['id']
def _test_pool_setup(self):
self._test_lb_setup()
def _test_health_monitor_setup(self):
self._test_lb_setup()
def _test_vip_setup(self):
self._test_pool_setup()
pool = self._pool_create(self._router_id)
self._pool_id = pool['id']
def _create_resource(self, res, data):
resp = self._do_request('POST', _get_path('lb/{0}s'.format(res)), data)
return resp[res]
def _pool_create(self, router_id=None):
data = {
"pool": {
"tenant_id": self._tenant_id,
"name": "test",
"protocol": "HTTP",
"subnet_id": self._subnet_id,
"lb_method": "ROUND_ROBIN",
"router_id": router_id
}
}
return self._create_resource('pool', data)
def _pool_update_attrs(self, pool):
uattr = {}
fields = [
'name', 'description', 'lb_method',
'health_monitors', 'admin_state_up'
]
for field in fields:
uattr[field] = pool[field]
return uattr
def _health_monitor_create(self, router_id=None):
data = {
"health_monitor": {
"tenant_id": self._tenant_id,
"type": "HTTP",
"delay": 1,
"timeout": 1,
"max_retries": 1,
"router_id": router_id
}
}
return self._create_resource('health_monitor', data)
def _health_monitor_update_attrs(self, hm):
uattr = {}
fields = ['delay', 'timeout', 'max_retries']
for field in fields:
uattr[field] = hm[field]
return uattr
def _vip_create(self, router_id=None):
data = {
"vip": {
"tenant_id": self._tenant_id,
"name": "test",
"protocol": "HTTP",
"protocol_port": 80,
"subnet_id": self._subnet_id,
"pool_id": self._pool_id,
"address": "192.168.1.102",
"connection_limit": 100,
"admin_state_up": True,
"router_id": router_id
}
}
return self._create_resource('vip', data)
def _vip_update_attrs(self, vip):
uattr = {}
fields = [
'name', 'description', 'pool_id', 'connection_limit',
'admin_state_up'
]
for field in fields:
uattr[field] = vip[field]
return uattr
def _test_resource_create(self, res):
getattr(self, "_test_{0}_setup".format(res))()
obj = getattr(self, "_{0}_create".format(res))(self._router_id)
self.assertEqual(obj['router_id'], self._router_id)
def _test_resource_update(self, res, update_router_id,
update_attr, update_value):
getattr(self, "_test_{0}_setup".format(res))()
obj = getattr(self, "_{0}_create".format(res))(self._router_id)
uattrs = getattr(self, "_{0}_update_attrs".format(res))(obj)
uattrs[update_attr] = update_value
data = {res: uattrs}
if update_router_id:
uattrs['router_id'] = self._router_id
with testtools.ExpectedException(
webexc.HTTPClientError) as ctx_manager:
newobj = self._do_request(
'PUT',
_get_path('lb/{0}s/{1}'.format(res, obj['id'])), data)
self.assertEqual(ctx_manager.exception.code, 400)
else:
newobj = self._do_request(
'PUT',
_get_path('lb/{0}s/{1}'.format(res, obj['id'])), data)
updated = self._do_request(
'GET',
_get_path('lb/{0}s/{1}'.format(res, obj['id'])))
self.assertEqual(updated[res][update_attr], update_value)
def _test_resource_delete(self, res, with_router_id):
getattr(self, "_test_{0}_setup".format(res))()
func = getattr(self, "_{0}_create".format(res))
if with_router_id:
obj = func(self._router_id)
else:
obj = func()
self._do_request(
'DELETE', _get_path('lb/{0}s/{1}'.format(res, obj['id'])))
def test_pool_create(self):
self._test_resource_create('pool')
def test_pool_update_with_router_id(self):
self._test_resource_update('pool', True, 'name', _uuid())
def test_pool_update_without_router_id(self):
self._test_resource_update('pool', False, 'name', _uuid())
def test_pool_delete_with_router_id(self):
self._test_resource_delete('pool', True)
def test_pool_delete_without_router_id(self):
self._test_resource_delete('pool', False)
def test_health_monitor_create(self):
self._test_resource_create('health_monitor')
def test_health_monitor_update_with_router_id(self):
self._test_resource_update('health_monitor', True, 'timeout', 2)
def test_health_monitor_update_without_router_id(self):
self._test_resource_update('health_monitor', False, 'timeout', 2)
def test_health_monitor_delete_with_router_id(self):
self._test_resource_delete('health_monitor', True)
def test_health_monitor_delete_without_router_id(self):
self._test_resource_delete('health_monitor', False)
def test_vip_create(self):
self._test_resource_create('vip')
def test_vip_update_with_router_id(self):
self._test_resource_update('vip', True, 'name', _uuid())
def test_vip_update_without_router_id(self):
self._test_resource_update('vip', False, 'name', _uuid())
def test_vip_delete_with_router_id(self):
self._test_resource_delete('vip', True)
def test_vip_delete_without_router_id(self):
self._test_resource_delete('vip', False)
|
apache-2.0
|
rahushen/ansible
|
lib/ansible/modules/database/misc/riak.py
|
20
|
7457
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, James Martin <[email protected]>, Drew Kerrigan <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: riak
short_description: This module handles some common Riak operations
description:
- This module can be used to join nodes to a cluster, check
the status of the cluster.
version_added: "1.2"
author:
- "James Martin (@jsmartin)"
- "Drew Kerrigan (@drewkerrigan)"
options:
command:
description:
- The command you would like to perform against the cluster.
required: false
default: null
choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
config_dir:
description:
- The path to the riak configuration directory
required: false
default: /etc/riak
http_conn:
description:
- The ip address and port that is listening for Riak HTTP queries
required: false
default: 127.0.0.1:8098
target_node:
description:
- The target node for certain operations (join, ping)
required: false
default: [email protected]
wait_for_handoffs:
description:
- Number of seconds to wait for handoffs to complete.
required: false
default: null
wait_for_ring:
description:
- Number of seconds to wait for all nodes to agree on the ring.
required: false
default: null
wait_for_service:
description:
- Waits for a riak service to come online before continuing.
required: false
default: None
choices: ['kv']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
'''
EXAMPLES = '''
# Join's a Riak node to another node
- riak:
command: join
target_node: [email protected]
# Wait for handoffs to finish. Use with async and poll.
- riak:
wait_for_handoffs: yes
# Wait for riak_kv service to startup
- riak:
wait_for_service: kv
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def ring_check(module, riak_admin_bin):
cmd = '%s ringready' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0 and 'TRUE All nodes agree on the ring' in out:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=False, default=None, choices=[
'ping', 'kv_test', 'join', 'plan', 'commit']),
config_dir=dict(default='/etc/riak', type='path'),
http_conn=dict(required=False, default='127.0.0.1:8098'),
target_node=dict(default='[email protected]', required=False),
wait_for_handoffs=dict(default=False, type='int'),
wait_for_ring=dict(default=False, type='int'),
wait_for_service=dict(
required=False, default=None, choices=['kv']),
validate_certs=dict(default='yes', type='bool'))
)
command = module.params.get('command')
http_conn = module.params.get('http_conn')
target_node = module.params.get('target_node')
wait_for_handoffs = module.params.get('wait_for_handoffs')
wait_for_ring = module.params.get('wait_for_ring')
wait_for_service = module.params.get('wait_for_service')
# make sure riak commands are on the path
riak_bin = module.get_bin_path('riak')
riak_admin_bin = module.get_bin_path('riak-admin')
timeout = time.time() + 120
while True:
if time.time() > timeout:
module.fail_json(msg='Timeout, could not fetch Riak stats.')
(response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
if info['status'] == 200:
stats_raw = response.read()
break
time.sleep(5)
# here we attempt to load those stats,
try:
stats = json.loads(stats_raw)
except:
module.fail_json(msg='Could not parse Riak stats.')
node_name = stats['nodename']
nodes = stats['ring_members']
ring_size = stats['ring_creation_size']
rc, out, err = module.run_command([riak_bin, 'version'])
version = out.strip()
result = dict(node_name=node_name,
nodes=nodes,
ring_size=ring_size,
version=version)
if command == 'ping':
cmd = '%s ping %s' % (riak_bin, target_node)
rc, out, err = module.run_command(cmd)
if rc == 0:
result['ping'] = out
else:
module.fail_json(msg=out)
elif command == 'kv_test':
cmd = '%s test' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['kv_test'] = out
else:
module.fail_json(msg=out)
elif command == 'join':
if nodes.count(node_name) == 1 and len(nodes) > 1:
result['join'] = 'Node is already in cluster or staged to be in cluster.'
else:
cmd = '%s cluster join %s' % (riak_admin_bin, target_node)
rc, out, err = module.run_command(cmd)
if rc == 0:
result['join'] = out
result['changed'] = True
else:
module.fail_json(msg=out)
elif command == 'plan':
cmd = '%s cluster plan' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['plan'] = out
if 'Staged Changes' in out:
result['changed'] = True
else:
module.fail_json(msg=out)
elif command == 'commit':
cmd = '%s cluster commit' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['commit'] = out
result['changed'] = True
else:
module.fail_json(msg=out)
# this could take a while, recommend to run in async mode
if wait_for_handoffs:
timeout = time.time() + wait_for_handoffs
while True:
cmd = '%s transfers' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if 'No transfers active' in out:
result['handoffs'] = 'No transfers active.'
break
time.sleep(10)
if time.time() > timeout:
module.fail_json(msg='Timeout waiting for handoffs.')
if wait_for_service:
cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name]
rc, out, err = module.run_command(cmd)
result['service'] = out
if wait_for_ring:
timeout = time.time() + wait_for_ring
while True:
if ring_check(module, riak_admin_bin):
break
time.sleep(10)
if time.time() > timeout:
module.fail_json(msg='Timeout waiting for nodes to agree on ring.')
result['ring_ready'] = ring_check(module, riak_admin_bin)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
rghe/ansible
|
lib/ansible/modules/notification/twilio.py
|
47
|
5594
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Matt Makai <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
version_added: "1.6"
module: twilio
short_description: Sends a text message to a mobile phone through Twilio.
description:
- Sends a text message to a phone number through the Twilio messaging API.
notes:
- This module is non-idempotent because it sends an email through the
external API. It is idempotent only in the case that the module fails.
- Like the other notification modules, this one requires an external
dependency to work. In this case, you'll need a Twilio account with
a purchased or verified phone number to send the text message.
options:
account_sid:
description:
user's Twilio account token found on the account page
required: true
auth_token:
description: user's Twilio authentication token
required: true
msg:
description:
the body of the text message
required: true
to_number:
description:
one or more phone numbers to send the text message to,
format +15551112222
required: true
from_number:
description:
the Twilio number to send the text message from, format +15551112222
required: true
media_url:
description:
a URL with a picture, video or sound clip to send with an MMS
(multimedia message) instead of a plain SMS
required: false
author: "Matt Makai (@makaimc)"
'''
EXAMPLES = '''
# send an SMS about the build status to (555) 303 5681
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: All servers with webserver role are now configured.
account_sid: ACXXXXXXXXXXXXXXXXX
auth_token: ACXXXXXXXXXXXXXXXXX
from_number: +15552014545
to_number: +15553035681
delegate_to: localhost
# send an SMS to multiple phone numbers about the deployment
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: This server configuration is now complete.
account_sid: ACXXXXXXXXXXXXXXXXX
auth_token: ACXXXXXXXXXXXXXXXXX
from_number: +15553258899
to_number:
- +15551113232
- +12025551235
- +19735559010
delegate_to: localhost
# send an MMS to a single recipient with an update on the deployment
# and an image of the results
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: Deployment complete!
account_sid: ACXXXXXXXXXXXXXXXXX
auth_token: ACXXXXXXXXXXXXXXXXX
from_number: +15552014545
to_number: +15553035681
media_url: https://demo.twilio.com/logo.png
delegate_to: localhost
'''
# =======================================
# twilio module support methods
#
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import fetch_url
def post_twilio_api(module, account_sid, auth_token, msg, from_number,
to_number, media_url=None):
URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
% (account_sid,)
AGENT = "Ansible"
data = {'From': from_number, 'To': to_number, 'Body': msg}
if media_url:
data['MediaUrl'] = media_url
encoded_data = urlencode(data)
headers = {'User-Agent': AGENT,
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'application/json',
}
# Hack module params to have the Basic auth params that fetch_url expects
module.params['url_username'] = account_sid.replace('\n', '')
module.params['url_password'] = auth_token.replace('\n', '')
return fetch_url(module, URI, data=encoded_data, headers=headers)
# =======================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
account_sid=dict(required=True),
auth_token=dict(required=True, no_log=True),
msg=dict(required=True),
from_number=dict(required=True),
to_number=dict(required=True),
media_url=dict(default=None, required=False),
),
supports_check_mode=True
)
account_sid = module.params['account_sid']
auth_token = module.params['auth_token']
msg = module.params['msg']
from_number = module.params['from_number']
to_number = module.params['to_number']
media_url = module.params['media_url']
if not isinstance(to_number, list):
to_number = [to_number]
for number in to_number:
r, info = post_twilio_api(module, account_sid, auth_token, msg,
from_number, number, media_url)
if info['status'] not in [200, 201]:
body_message = "unknown error"
if 'body' in info:
body = json.loads(info['body'])
body_message = body['message']
module.fail_json(msg="unable to send message to %s: %s" % (number, body_message))
module.exit_json(msg=msg, changed=False)
if __name__ == '__main__':
main()
|
gpl-3.0
|
tseaver/gcloud-python
|
dataproc/google/cloud/dataproc_v1/proto/clusters_pb2.py
|
3
|
90026
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/dataproc_v1/proto/clusters.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.dataproc_v1.proto import operations_pb2 as google_dot_cloud_dot_dataproc__v1_dot_proto_dot_operations__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/dataproc_v1/proto/clusters.proto',
package='google.cloud.dataproc.v1',
syntax='proto3',
serialized_pb=_b('\n-google/cloud/dataproc_v1/proto/clusters.proto\x12\x18google.cloud.dataproc.v1\x1a\x1cgoogle/api/annotations.proto\x1a/google/cloud/dataproc_v1/proto/operations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xa5\x03\n\x07\x43luster\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x14\n\x0c\x63luster_name\x18\x02 \x01(\t\x12\x37\n\x06\x63onfig\x18\x03 \x01(\x0b\x32\'.google.cloud.dataproc.v1.ClusterConfig\x12=\n\x06labels\x18\x08 \x03(\x0b\x32-.google.cloud.dataproc.v1.Cluster.LabelsEntry\x12\x37\n\x06status\x18\x04 \x01(\x0b\x32\'.google.cloud.dataproc.v1.ClusterStatus\x12?\n\x0estatus_history\x18\x07 \x03(\x0b\x32\'.google.cloud.dataproc.v1.ClusterStatus\x12\x14\n\x0c\x63luster_uuid\x18\x06 \x01(\t\x12\x39\n\x07metrics\x18\t \x01(\x0b\x32(.google.cloud.dataproc.v1.ClusterMetrics\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe1\x03\n\rClusterConfig\x12\x15\n\rconfig_bucket\x18\x01 \x01(\t\x12\x46\n\x12gce_cluster_config\x18\x08 \x01(\x0b\x32*.google.cloud.dataproc.v1.GceClusterConfig\x12\x44\n\rmaster_config\x18\t \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfig\x12\x44\n\rworker_config\x18\n \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfig\x12N\n\x17secondary_worker_config\x18\x0c \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfig\x12\x41\n\x0fsoftware_config\x18\r \x01(\x0b\x32(.google.cloud.dataproc.v1.SoftwareConfig\x12R\n\x16initialization_actions\x18\x0b \x03(\x0b\x32\x32.google.cloud.dataproc.v1.NodeInitializationAction\"\xaf\x02\n\x10GceClusterConfig\x12\x10\n\x08zone_uri\x18\x01 \x01(\t\x12\x13\n\x0bnetwork_uri\x18\x02 \x01(\t\x12\x16\n\x0esubnetwork_uri\x18\x06 \x01(\t\x12\x18\n\x10internal_ip_only\x18\x07 \x01(\x08\x12\x17\n\x0fservice_account\x18\x08 \x01(\t\x12\x1e\n\x16service_account_scopes\x18\x03 \x03(\t\x12\x0c\n\x04tags\x18\x04 \x03(\t\x12J\n\x08metadata\x18\x05 \x03(\x0b\x32\x38.google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xd3\x02\n\x13InstanceGroupConfig\x12\x15\n\rnum_instances\x18\x01 \x01(\x05\x12\x16\n\x0einstance_names\x18\x02 \x03(\t\x12\x11\n\timage_uri\x18\x03 \x01(\t\x12\x18\n\x10machine_type_uri\x18\x04 \x01(\t\x12\x39\n\x0b\x64isk_config\x18\x05 \x01(\x0b\x32$.google.cloud.dataproc.v1.DiskConfig\x12\x16\n\x0eis_preemptible\x18\x06 \x01(\x08\x12J\n\x14managed_group_config\x18\x07 \x01(\x0b\x32,.google.cloud.dataproc.v1.ManagedGroupConfig\x12\x41\n\x0c\x61\x63\x63\x65lerators\x18\x08 \x03(\x0b\x32+.google.cloud.dataproc.v1.AcceleratorConfig\"Y\n\x12ManagedGroupConfig\x12\x1e\n\x16instance_template_name\x18\x01 \x01(\t\x12#\n\x1binstance_group_manager_name\x18\x02 \x01(\t\"L\n\x11\x41\x63\x63\x65leratorConfig\x12\x1c\n\x14\x61\x63\x63\x65lerator_type_uri\x18\x01 \x01(\t\x12\x19\n\x11\x61\x63\x63\x65lerator_count\x18\x02 \x01(\x05\"?\n\nDiskConfig\x12\x19\n\x11\x62oot_disk_size_gb\x18\x01 \x01(\x05\x12\x16\n\x0enum_local_ssds\x18\x02 \x01(\x05\"i\n\x18NodeInitializationAction\x12\x17\n\x0f\x65xecutable_file\x18\x01 \x01(\t\x12\x34\n\x11\x65xecution_timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xed\x02\n\rClusterStatus\x12<\n\x05state\x18\x01 \x01(\x0e\x32-.google.cloud.dataproc.v1.ClusterStatus.State\x12\x0e\n\x06\x64\x65tail\x18\x02 \x01(\t\x12\x34\n\x10state_start_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x08substate\x18\x04 \x01(\x0e\x32\x30.google.cloud.dataproc.v1.ClusterStatus.Substate\"V\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\t\n\x05\x45RROR\x10\x03\x12\x0c\n\x08\x44\x45LETING\x10\x04\x12\x0c\n\x08UPDATING\x10\x05\"<\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tUNHEALTHY\x10\x01\x12\x10\n\x0cSTALE_STATUS\x10\x02\"\xa8\x01\n\x0eSoftwareConfig\x12\x15\n\rimage_version\x18\x01 \x01(\t\x12L\n\nproperties\x18\x02 \x03(\x0b\x32\x38.google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntry\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x9a\x02\n\x0e\x43lusterMetrics\x12O\n\x0chdfs_metrics\x18\x01 \x03(\x0b\x32\x39.google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry\x12O\n\x0cyarn_metrics\x18\x02 \x03(\x0b\x32\x39.google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry\x1a\x32\n\x10HdfsMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1a\x32\n\x10YarnMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\"n\n\x14\x43reateClusterRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0e\n\x06region\x18\x03 \x01(\t\x12\x32\n\x07\x63luster\x18\x02 \x01(\x0b\x32!.google.cloud.dataproc.v1.Cluster\"\xb5\x01\n\x14UpdateClusterRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0e\n\x06region\x18\x05 \x01(\t\x12\x14\n\x0c\x63luster_name\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.cloud.dataproc.v1.Cluster\x12/\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"P\n\x14\x44\x65leteClusterRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0e\n\x06region\x18\x03 \x01(\t\x12\x14\n\x0c\x63luster_name\x18\x02 \x01(\t\"M\n\x11GetClusterRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0e\n\x06region\x18\x03 \x01(\t\x12\x14\n\x0c\x63luster_name\x18\x02 \x01(\t\"p\n\x13ListClustersRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0e\n\x06region\x18\x04 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x05 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"d\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.cloud.dataproc.v1.Cluster\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"R\n\x16\x44iagnoseClusterRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0e\n\x06region\x18\x03 \x01(\t\x12\x14\n\x0c\x63luster_name\x18\x02 \x01(\t\",\n\x16\x44iagnoseClusterResults\x12\x12\n\noutput_uri\x18\x01 \x01(\t2\xb2\x08\n\x11\x43lusterController\x12\xa4\x01\n\rCreateCluster\x12..google.cloud.dataproc.v1.CreateClusterRequest\x1a\x1d.google.longrunning.Operation\"D\x82\xd3\xe4\x93\x02>\"3/v1/projects/{project_id}/regions/{region}/clusters:\x07\x63luster\x12\xb3\x01\n\rUpdateCluster\x12..google.cloud.dataproc.v1.UpdateClusterRequest\x1a\x1d.google.longrunning.Operation\"S\x82\xd3\xe4\x93\x02M2B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\x07\x63luster\x12\xaa\x01\n\rDeleteCluster\x12..google.cloud.dataproc.v1.DeleteClusterRequest\x1a\x1d.google.longrunning.Operation\"J\x82\xd3\xe4\x93\x02\x44*B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\x12\xa8\x01\n\nGetCluster\x12+.google.cloud.dataproc.v1.GetClusterRequest\x1a!.google.cloud.dataproc.v1.Cluster\"J\x82\xd3\xe4\x93\x02\x44\x12\x42/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\x12\xaa\x01\n\x0cListClusters\x12-.google.cloud.dataproc.v1.ListClustersRequest\x1a..google.cloud.dataproc.v1.ListClustersResponse\";\x82\xd3\xe4\x93\x02\x35\x12\x33/v1/projects/{project_id}/regions/{region}/clusters\x12\xba\x01\n\x0f\x44iagnoseCluster\x12\x30.google.cloud.dataproc.v1.DiagnoseClusterRequest\x1a\x1d.google.longrunning.Operation\"V\x82\xd3\xe4\x93\x02P\"K/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\x01*Bq\n\x1c\x63om.google.cloud.dataproc.v1B\rClustersProtoP\[email protected]/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_dataproc__v1_dot_proto_dot_operations__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_CLUSTERSTATUS_STATE = _descriptor.EnumDescriptor(
name='State',
full_name='google.cloud.dataproc.v1.ClusterStatus.State',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CREATING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DELETING', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPDATING', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2405,
serialized_end=2491,
)
_sym_db.RegisterEnumDescriptor(_CLUSTERSTATUS_STATE)
_CLUSTERSTATUS_SUBSTATE = _descriptor.EnumDescriptor(
name='Substate',
full_name='google.cloud.dataproc.v1.ClusterStatus.Substate',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNHEALTHY', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STALE_STATUS', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2493,
serialized_end=2553,
)
_sym_db.RegisterEnumDescriptor(_CLUSTERSTATUS_SUBSTATE)
_CLUSTER_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='google.cloud.dataproc.v1.Cluster.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.cloud.dataproc.v1.Cluster.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='google.cloud.dataproc.v1.Cluster.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=667,
serialized_end=712,
)
_CLUSTER = _descriptor.Descriptor(
name='Cluster',
full_name='google.cloud.dataproc.v1.Cluster',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.cloud.dataproc.v1.Cluster.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_name', full_name='google.cloud.dataproc.v1.Cluster.cluster_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='config', full_name='google.cloud.dataproc.v1.Cluster.config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='google.cloud.dataproc.v1.Cluster.labels', index=3,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='google.cloud.dataproc.v1.Cluster.status', index=4,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status_history', full_name='google.cloud.dataproc.v1.Cluster.status_history', index=5,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_uuid', full_name='google.cloud.dataproc.v1.Cluster.cluster_uuid', index=6,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metrics', full_name='google.cloud.dataproc.v1.Cluster.metrics', index=7,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CLUSTER_LABELSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=291,
serialized_end=712,
)
_CLUSTERCONFIG = _descriptor.Descriptor(
name='ClusterConfig',
full_name='google.cloud.dataproc.v1.ClusterConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='config_bucket', full_name='google.cloud.dataproc.v1.ClusterConfig.config_bucket', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gce_cluster_config', full_name='google.cloud.dataproc.v1.ClusterConfig.gce_cluster_config', index=1,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='master_config', full_name='google.cloud.dataproc.v1.ClusterConfig.master_config', index=2,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='worker_config', full_name='google.cloud.dataproc.v1.ClusterConfig.worker_config', index=3,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='secondary_worker_config', full_name='google.cloud.dataproc.v1.ClusterConfig.secondary_worker_config', index=4,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='software_config', full_name='google.cloud.dataproc.v1.ClusterConfig.software_config', index=5,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='initialization_actions', full_name='google.cloud.dataproc.v1.ClusterConfig.initialization_actions', index=6,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=715,
serialized_end=1196,
)
_GCECLUSTERCONFIG_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1455,
serialized_end=1502,
)
_GCECLUSTERCONFIG = _descriptor.Descriptor(
name='GceClusterConfig',
full_name='google.cloud.dataproc.v1.GceClusterConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='zone_uri', full_name='google.cloud.dataproc.v1.GceClusterConfig.zone_uri', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='network_uri', full_name='google.cloud.dataproc.v1.GceClusterConfig.network_uri', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subnetwork_uri', full_name='google.cloud.dataproc.v1.GceClusterConfig.subnetwork_uri', index=2,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='internal_ip_only', full_name='google.cloud.dataproc.v1.GceClusterConfig.internal_ip_only', index=3,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service_account', full_name='google.cloud.dataproc.v1.GceClusterConfig.service_account', index=4,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service_account_scopes', full_name='google.cloud.dataproc.v1.GceClusterConfig.service_account_scopes', index=5,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='google.cloud.dataproc.v1.GceClusterConfig.tags', index=6,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='google.cloud.dataproc.v1.GceClusterConfig.metadata', index=7,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GCECLUSTERCONFIG_METADATAENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1199,
serialized_end=1502,
)
_INSTANCEGROUPCONFIG = _descriptor.Descriptor(
name='InstanceGroupConfig',
full_name='google.cloud.dataproc.v1.InstanceGroupConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_instances', full_name='google.cloud.dataproc.v1.InstanceGroupConfig.num_instances', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_names', full_name='google.cloud.dataproc.v1.InstanceGroupConfig.instance_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_uri', full_name='google.cloud.dataproc.v1.InstanceGroupConfig.image_uri', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='machine_type_uri', full_name='google.cloud.dataproc.v1.InstanceGroupConfig.machine_type_uri', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disk_config', full_name='google.cloud.dataproc.v1.InstanceGroupConfig.disk_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_preemptible', full_name='google.cloud.dataproc.v1.InstanceGroupConfig.is_preemptible', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='managed_group_config', full_name='google.cloud.dataproc.v1.InstanceGroupConfig.managed_group_config', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='accelerators', full_name='google.cloud.dataproc.v1.InstanceGroupConfig.accelerators', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1505,
serialized_end=1844,
)
_MANAGEDGROUPCONFIG = _descriptor.Descriptor(
name='ManagedGroupConfig',
full_name='google.cloud.dataproc.v1.ManagedGroupConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instance_template_name', full_name='google.cloud.dataproc.v1.ManagedGroupConfig.instance_template_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_group_manager_name', full_name='google.cloud.dataproc.v1.ManagedGroupConfig.instance_group_manager_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1846,
serialized_end=1935,
)
_ACCELERATORCONFIG = _descriptor.Descriptor(
name='AcceleratorConfig',
full_name='google.cloud.dataproc.v1.AcceleratorConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='accelerator_type_uri', full_name='google.cloud.dataproc.v1.AcceleratorConfig.accelerator_type_uri', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='accelerator_count', full_name='google.cloud.dataproc.v1.AcceleratorConfig.accelerator_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1937,
serialized_end=2013,
)
_DISKCONFIG = _descriptor.Descriptor(
name='DiskConfig',
full_name='google.cloud.dataproc.v1.DiskConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='boot_disk_size_gb', full_name='google.cloud.dataproc.v1.DiskConfig.boot_disk_size_gb', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_local_ssds', full_name='google.cloud.dataproc.v1.DiskConfig.num_local_ssds', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2015,
serialized_end=2078,
)
_NODEINITIALIZATIONACTION = _descriptor.Descriptor(
name='NodeInitializationAction',
full_name='google.cloud.dataproc.v1.NodeInitializationAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='executable_file', full_name='google.cloud.dataproc.v1.NodeInitializationAction.executable_file', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='execution_timeout', full_name='google.cloud.dataproc.v1.NodeInitializationAction.execution_timeout', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2080,
serialized_end=2185,
)
_CLUSTERSTATUS = _descriptor.Descriptor(
name='ClusterStatus',
full_name='google.cloud.dataproc.v1.ClusterStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='google.cloud.dataproc.v1.ClusterStatus.state', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='detail', full_name='google.cloud.dataproc.v1.ClusterStatus.detail', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state_start_time', full_name='google.cloud.dataproc.v1.ClusterStatus.state_start_time', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='substate', full_name='google.cloud.dataproc.v1.ClusterStatus.substate', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_CLUSTERSTATUS_STATE,
_CLUSTERSTATUS_SUBSTATE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2188,
serialized_end=2553,
)
_SOFTWARECONFIG_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2675,
serialized_end=2724,
)
_SOFTWARECONFIG = _descriptor.Descriptor(
name='SoftwareConfig',
full_name='google.cloud.dataproc.v1.SoftwareConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image_version', full_name='google.cloud.dataproc.v1.SoftwareConfig.image_version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='properties', full_name='google.cloud.dataproc.v1.SoftwareConfig.properties', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SOFTWARECONFIG_PROPERTIESENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2556,
serialized_end=2724,
)
_CLUSTERMETRICS_HDFSMETRICSENTRY = _descriptor.Descriptor(
name='HdfsMetricsEntry',
full_name='google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry.value', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2907,
serialized_end=2957,
)
_CLUSTERMETRICS_YARNMETRICSENTRY = _descriptor.Descriptor(
name='YarnMetricsEntry',
full_name='google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry.value', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2959,
serialized_end=3009,
)
_CLUSTERMETRICS = _descriptor.Descriptor(
name='ClusterMetrics',
full_name='google.cloud.dataproc.v1.ClusterMetrics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hdfs_metrics', full_name='google.cloud.dataproc.v1.ClusterMetrics.hdfs_metrics', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='yarn_metrics', full_name='google.cloud.dataproc.v1.ClusterMetrics.yarn_metrics', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CLUSTERMETRICS_HDFSMETRICSENTRY, _CLUSTERMETRICS_YARNMETRICSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2727,
serialized_end=3009,
)
_CREATECLUSTERREQUEST = _descriptor.Descriptor(
name='CreateClusterRequest',
full_name='google.cloud.dataproc.v1.CreateClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.cloud.dataproc.v1.CreateClusterRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='region', full_name='google.cloud.dataproc.v1.CreateClusterRequest.region', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='google.cloud.dataproc.v1.CreateClusterRequest.cluster', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3011,
serialized_end=3121,
)
_UPDATECLUSTERREQUEST = _descriptor.Descriptor(
name='UpdateClusterRequest',
full_name='google.cloud.dataproc.v1.UpdateClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.cloud.dataproc.v1.UpdateClusterRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='region', full_name='google.cloud.dataproc.v1.UpdateClusterRequest.region', index=1,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_name', full_name='google.cloud.dataproc.v1.UpdateClusterRequest.cluster_name', index=2,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='google.cloud.dataproc.v1.UpdateClusterRequest.cluster', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update_mask', full_name='google.cloud.dataproc.v1.UpdateClusterRequest.update_mask', index=4,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3124,
serialized_end=3305,
)
_DELETECLUSTERREQUEST = _descriptor.Descriptor(
name='DeleteClusterRequest',
full_name='google.cloud.dataproc.v1.DeleteClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.cloud.dataproc.v1.DeleteClusterRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='region', full_name='google.cloud.dataproc.v1.DeleteClusterRequest.region', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_name', full_name='google.cloud.dataproc.v1.DeleteClusterRequest.cluster_name', index=2,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3307,
serialized_end=3387,
)
_GETCLUSTERREQUEST = _descriptor.Descriptor(
name='GetClusterRequest',
full_name='google.cloud.dataproc.v1.GetClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.cloud.dataproc.v1.GetClusterRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='region', full_name='google.cloud.dataproc.v1.GetClusterRequest.region', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_name', full_name='google.cloud.dataproc.v1.GetClusterRequest.cluster_name', index=2,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3389,
serialized_end=3466,
)
_LISTCLUSTERSREQUEST = _descriptor.Descriptor(
name='ListClustersRequest',
full_name='google.cloud.dataproc.v1.ListClustersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.cloud.dataproc.v1.ListClustersRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='region', full_name='google.cloud.dataproc.v1.ListClustersRequest.region', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter', full_name='google.cloud.dataproc.v1.ListClustersRequest.filter', index=2,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='google.cloud.dataproc.v1.ListClustersRequest.page_size', index=3,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_token', full_name='google.cloud.dataproc.v1.ListClustersRequest.page_token', index=4,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3468,
serialized_end=3580,
)
_LISTCLUSTERSRESPONSE = _descriptor.Descriptor(
name='ListClustersResponse',
full_name='google.cloud.dataproc.v1.ListClustersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='clusters', full_name='google.cloud.dataproc.v1.ListClustersResponse.clusters', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='google.cloud.dataproc.v1.ListClustersResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3582,
serialized_end=3682,
)
_DIAGNOSECLUSTERREQUEST = _descriptor.Descriptor(
name='DiagnoseClusterRequest',
full_name='google.cloud.dataproc.v1.DiagnoseClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.cloud.dataproc.v1.DiagnoseClusterRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='region', full_name='google.cloud.dataproc.v1.DiagnoseClusterRequest.region', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_name', full_name='google.cloud.dataproc.v1.DiagnoseClusterRequest.cluster_name', index=2,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3684,
serialized_end=3766,
)
_DIAGNOSECLUSTERRESULTS = _descriptor.Descriptor(
name='DiagnoseClusterResults',
full_name='google.cloud.dataproc.v1.DiagnoseClusterResults',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='output_uri', full_name='google.cloud.dataproc.v1.DiagnoseClusterResults.output_uri', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3768,
serialized_end=3812,
)
_CLUSTER_LABELSENTRY.containing_type = _CLUSTER
_CLUSTER.fields_by_name['config'].message_type = _CLUSTERCONFIG
_CLUSTER.fields_by_name['labels'].message_type = _CLUSTER_LABELSENTRY
_CLUSTER.fields_by_name['status'].message_type = _CLUSTERSTATUS
_CLUSTER.fields_by_name['status_history'].message_type = _CLUSTERSTATUS
_CLUSTER.fields_by_name['metrics'].message_type = _CLUSTERMETRICS
_CLUSTERCONFIG.fields_by_name['gce_cluster_config'].message_type = _GCECLUSTERCONFIG
_CLUSTERCONFIG.fields_by_name['master_config'].message_type = _INSTANCEGROUPCONFIG
_CLUSTERCONFIG.fields_by_name['worker_config'].message_type = _INSTANCEGROUPCONFIG
_CLUSTERCONFIG.fields_by_name['secondary_worker_config'].message_type = _INSTANCEGROUPCONFIG
_CLUSTERCONFIG.fields_by_name['software_config'].message_type = _SOFTWARECONFIG
_CLUSTERCONFIG.fields_by_name['initialization_actions'].message_type = _NODEINITIALIZATIONACTION
_GCECLUSTERCONFIG_METADATAENTRY.containing_type = _GCECLUSTERCONFIG
_GCECLUSTERCONFIG.fields_by_name['metadata'].message_type = _GCECLUSTERCONFIG_METADATAENTRY
_INSTANCEGROUPCONFIG.fields_by_name['disk_config'].message_type = _DISKCONFIG
_INSTANCEGROUPCONFIG.fields_by_name['managed_group_config'].message_type = _MANAGEDGROUPCONFIG
_INSTANCEGROUPCONFIG.fields_by_name['accelerators'].message_type = _ACCELERATORCONFIG
_NODEINITIALIZATIONACTION.fields_by_name['execution_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_CLUSTERSTATUS.fields_by_name['state'].enum_type = _CLUSTERSTATUS_STATE
_CLUSTERSTATUS.fields_by_name['state_start_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_CLUSTERSTATUS.fields_by_name['substate'].enum_type = _CLUSTERSTATUS_SUBSTATE
_CLUSTERSTATUS_STATE.containing_type = _CLUSTERSTATUS
_CLUSTERSTATUS_SUBSTATE.containing_type = _CLUSTERSTATUS
_SOFTWARECONFIG_PROPERTIESENTRY.containing_type = _SOFTWARECONFIG
_SOFTWARECONFIG.fields_by_name['properties'].message_type = _SOFTWARECONFIG_PROPERTIESENTRY
_CLUSTERMETRICS_HDFSMETRICSENTRY.containing_type = _CLUSTERMETRICS
_CLUSTERMETRICS_YARNMETRICSENTRY.containing_type = _CLUSTERMETRICS
_CLUSTERMETRICS.fields_by_name['hdfs_metrics'].message_type = _CLUSTERMETRICS_HDFSMETRICSENTRY
_CLUSTERMETRICS.fields_by_name['yarn_metrics'].message_type = _CLUSTERMETRICS_YARNMETRICSENTRY
_CREATECLUSTERREQUEST.fields_by_name['cluster'].message_type = _CLUSTER
_UPDATECLUSTERREQUEST.fields_by_name['cluster'].message_type = _CLUSTER
_UPDATECLUSTERREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_LISTCLUSTERSRESPONSE.fields_by_name['clusters'].message_type = _CLUSTER
DESCRIPTOR.message_types_by_name['Cluster'] = _CLUSTER
DESCRIPTOR.message_types_by_name['ClusterConfig'] = _CLUSTERCONFIG
DESCRIPTOR.message_types_by_name['GceClusterConfig'] = _GCECLUSTERCONFIG
DESCRIPTOR.message_types_by_name['InstanceGroupConfig'] = _INSTANCEGROUPCONFIG
DESCRIPTOR.message_types_by_name['ManagedGroupConfig'] = _MANAGEDGROUPCONFIG
DESCRIPTOR.message_types_by_name['AcceleratorConfig'] = _ACCELERATORCONFIG
DESCRIPTOR.message_types_by_name['DiskConfig'] = _DISKCONFIG
DESCRIPTOR.message_types_by_name['NodeInitializationAction'] = _NODEINITIALIZATIONACTION
DESCRIPTOR.message_types_by_name['ClusterStatus'] = _CLUSTERSTATUS
DESCRIPTOR.message_types_by_name['SoftwareConfig'] = _SOFTWARECONFIG
DESCRIPTOR.message_types_by_name['ClusterMetrics'] = _CLUSTERMETRICS
DESCRIPTOR.message_types_by_name['CreateClusterRequest'] = _CREATECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['UpdateClusterRequest'] = _UPDATECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['GetClusterRequest'] = _GETCLUSTERREQUEST
DESCRIPTOR.message_types_by_name['ListClustersRequest'] = _LISTCLUSTERSREQUEST
DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE
DESCRIPTOR.message_types_by_name['DiagnoseClusterRequest'] = _DIAGNOSECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['DiagnoseClusterResults'] = _DIAGNOSECLUSTERRESULTS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Cluster = _reflection.GeneratedProtocolMessageType('Cluster', (_message.Message,), dict(
LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _CLUSTER_LABELSENTRY,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.Cluster.LabelsEntry)
))
,
DESCRIPTOR = _CLUSTER,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """Describes the identifying information, config, and status of a cluster
of Google Compute Engine instances.
Attributes:
project_id:
Required. The Google Cloud Platform project ID that the
cluster belongs to.
cluster_name:
Required. The cluster name. Cluster names within a project
must be unique. Names of deleted clusters can be reused.
config:
Required. The cluster config. Note that Cloud Dataproc may set
default values, and values may change when clusters are
updated.
labels:
Optional. The labels to associate with this cluster. Label
**keys** must contain 1 to 63 characters, and must conform to
`RFC 1035 <https://www.ietf.org/rfc/rfc1035.txt>`__. Label
**values** may be empty, but, if present, must contain 1 to 63
characters, and must conform to `RFC 1035
<https://www.ietf.org/rfc/rfc1035.txt>`__. No more than 32
labels can be associated with a cluster.
status:
Output-only. Cluster status.
status_history:
Output-only. The previous cluster status.
cluster_uuid:
Output-only. A cluster UUID (Unique Universal Identifier).
Cloud Dataproc generates this value when it creates the
cluster.
metrics:
Contains cluster daemon metrics such as HDFS and YARN stats.
**Beta Feature**: This report is available for testing
purposes only. It may be changed before final release.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.Cluster)
))
_sym_db.RegisterMessage(Cluster)
_sym_db.RegisterMessage(Cluster.LabelsEntry)
ClusterConfig = _reflection.GeneratedProtocolMessageType('ClusterConfig', (_message.Message,), dict(
DESCRIPTOR = _CLUSTERCONFIG,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """The cluster config.
Attributes:
config_bucket:
Optional. A Google Cloud Storage staging bucket used for
sharing generated SSH keys and config. If you do not specify a
staging bucket, Cloud Dataproc will determine an appropriate
Cloud Storage location (US, ASIA, or EU) for your cluster's
staging bucket according to the Google Compute Engine zone
where your cluster is deployed, and then it will create and
manage this project-level, per-location bucket for you.
gce_cluster_config:
Required. The shared Google Compute Engine config settings for
all instances in a cluster.
master_config:
Optional. The Google Compute Engine config settings for the
master instance in a cluster.
worker_config:
Optional. The Google Compute Engine config settings for worker
instances in a cluster.
secondary_worker_config:
Optional. The Google Compute Engine config settings for
additional worker instances in a cluster.
software_config:
Optional. The config settings for software inside the cluster.
initialization_actions:
Optional. Commands to execute on each node after config is
completed. By default, executables are run on master and all
worker nodes. You can test a node's ``role`` metadata to run
an executable on a master or worker node, as shown below using
``curl`` (you can also use ``wget``): :: ROLE=$(curl -H
Metadata-Flavor:Google http://metadata/computeMetadata/v1/inst
ance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master'
]]; then ... master specific actions ... else
... worker specific actions ... fi
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterConfig)
))
_sym_db.RegisterMessage(ClusterConfig)
GceClusterConfig = _reflection.GeneratedProtocolMessageType('GceClusterConfig', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _GCECLUSTERCONFIG_METADATAENTRY,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry)
))
,
DESCRIPTOR = _GCECLUSTERCONFIG,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """Common config settings for resources of Google Compute Engine cluster
instances, applicable to all instances in the cluster.
Attributes:
zone_uri:
Optional. The zone where the Google Compute Engine cluster
will be located. On a create request, it is required in the
"global" region. If omitted in a non-global Cloud Dataproc
region, the service will pick a zone in the corresponding
Compute Engine region. On a get request, zone will always be
present. A full URL, partial URI, or short name are valid.
Examples: - ``https://www.googleapis.com/compute/v1/projects
/[project_id]/zones/[zone]`` -
``projects/[project_id]/zones/[zone]`` - ``us-central1-f``
network_uri:
Optional. The Google Compute Engine network to be used for
machine communications. Cannot be specified with
subnetwork\_uri. If neither ``network_uri`` nor
``subnetwork_uri`` is specified, the "default" network of the
project is used, if it exists. Cannot be a "Custom Subnet
Network" (see `Using Subnetworks
</compute/docs/subnetworks>`__ for more information). A full
URL, partial URI, or short name are valid. Examples: - ``htt
ps://www.googleapis.com/compute/v1/projects/[project_id]/regio
ns/global/default`` -
``projects/[project_id]/regions/global/default`` -
``default``
subnetwork_uri:
Optional. The Google Compute Engine subnetwork to be used for
machine communications. Cannot be specified with network\_uri.
A full URL, partial URI, or short name are valid. Examples: -
``https://www.googleapis.com/compute/v1/projects/[project_id]/
regions/us-east1/sub0`` - ``projects/[project_id]/regions/us-
east1/sub0`` - ``sub0``
internal_ip_only:
Optional. If true, all instances in the cluster will only have
internal IP addresses. By default, clusters are not restricted
to internal IP addresses, and will have ephemeral external IP
addresses assigned to each instance. This ``internal_ip_only``
restriction can only be enabled for subnetwork enabled
networks, and all off-cluster dependencies must be configured
to be accessible without external IP addresses.
service_account:
Optional. The service account of the instances. Defaults to
the default Google Compute Engine service account. Custom
service accounts need permissions equivalent to the folloing
IAM roles: - roles/logging.logWriter -
roles/storage.objectAdmin (see
https://cloud.google.com/compute/docs/access/service-
accounts#custom\_service\_accounts for more information).
Example: ``[account_id]@[project_id].iam.gserviceaccount.com``
service_account_scopes:
Optional. The URIs of service account scopes to be included in
Google Compute Engine instances. The following base set of
scopes is always included: -
https://www.googleapis.com/auth/cloud.useraccounts.readonly -
https://www.googleapis.com/auth/devstorage.read\_write -
https://www.googleapis.com/auth/logging.write If no scopes
are specified, the following defaults are also provided: -
https://www.googleapis.com/auth/bigquery -
https://www.googleapis.com/auth/bigtable.admin.table -
https://www.googleapis.com/auth/bigtable.data -
https://www.googleapis.com/auth/devstorage.full\_control
tags:
The Google Compute Engine tags to add to all instances (see
`Tagging instances </compute/docs/label-or-tag-
resources#tags>`__).
metadata:
The Google Compute Engine metadata entries to add to all
instances (see `Project and instance metadata
<https://cloud.google.com/compute/docs/storing-retrieving-
metadata#project_and_instance_metadata>`__).
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GceClusterConfig)
))
_sym_db.RegisterMessage(GceClusterConfig)
_sym_db.RegisterMessage(GceClusterConfig.MetadataEntry)
InstanceGroupConfig = _reflection.GeneratedProtocolMessageType('InstanceGroupConfig', (_message.Message,), dict(
DESCRIPTOR = _INSTANCEGROUPCONFIG,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """Optional. The config settings for Google Compute Engine resources in an
instance group, such as a master or worker group.
Attributes:
num_instances:
Optional. The number of VM instances in the instance group.
For master instance groups, must be set to 1.
instance_names:
Optional. The list of instance names. Cloud Dataproc derives
the names from ``cluster_name``, ``num_instances``, and the
instance group if not set by user (recommended practice is to
let Cloud Dataproc derive the name).
image_uri:
Output-only. The Google Compute Engine image resource used for
cluster instances. Inferred from
``SoftwareConfig.image_version``.
machine_type_uri:
Optional. The Google Compute Engine machine type used for
cluster instances. A full URL, partial URI, or short name are
valid. Examples: - ``https://www.googleapis.com/compute/v1/p
rojects/[project_id]/zones/us-
east1-a/machineTypes/n1-standard-2`` -
``projects/[project_id]/zones/us-
east1-a/machineTypes/n1-standard-2`` - ``n1-standard-2``
disk_config:
Optional. Disk option config settings.
is_preemptible:
Optional. Specifies that this instance group contains
preemptible instances.
managed_group_config:
Output-only. The config for Google Compute Engine Instance
Group Manager that manages this group. This is only used for
preemptible instance groups.
accelerators:
Optional. The Google Compute Engine accelerator configuration
for these instances. **Beta Feature**: This feature is still
under development. It may be changed before final release.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.InstanceGroupConfig)
))
_sym_db.RegisterMessage(InstanceGroupConfig)
ManagedGroupConfig = _reflection.GeneratedProtocolMessageType('ManagedGroupConfig', (_message.Message,), dict(
DESCRIPTOR = _MANAGEDGROUPCONFIG,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """Specifies the resources used to actively manage an instance group.
Attributes:
instance_template_name:
Output-only. The name of the Instance Template used for the
Managed Instance Group.
instance_group_manager_name:
Output-only. The name of the Instance Group Manager for this
group.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ManagedGroupConfig)
))
_sym_db.RegisterMessage(ManagedGroupConfig)
AcceleratorConfig = _reflection.GeneratedProtocolMessageType('AcceleratorConfig', (_message.Message,), dict(
DESCRIPTOR = _ACCELERATORCONFIG,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """Specifies the type and number of accelerator cards attached to the
instances of an instance group (see `GPUs on Compute
Engine </compute/docs/gpus/>`__).
Attributes:
accelerator_type_uri:
Full URL, partial URI, or short name of the accelerator type
resource to expose to this instance. See `Google Compute
Engine AcceleratorTypes
</compute/docs/reference/beta/acceleratorTypes>`__ Examples
\* ``https://www.googleapis.com/compute/beta/projects/[project
_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` \*
``projects/[project_id]/zones/us-
east1-a/acceleratorTypes/nvidia-tesla-k80`` \* ``nvidia-
tesla-k80``
accelerator_count:
The number of the accelerator cards of this type exposed to
this instance.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.AcceleratorConfig)
))
_sym_db.RegisterMessage(AcceleratorConfig)
DiskConfig = _reflection.GeneratedProtocolMessageType('DiskConfig', (_message.Message,), dict(
DESCRIPTOR = _DISKCONFIG,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """Specifies the config of disk options for a group of VM instances.
Attributes:
boot_disk_size_gb:
Optional. Size in GB of the boot disk (default is 500GB).
num_local_ssds:
Optional. Number of attached SSDs, from 0 to 4 (default is 0).
If SSDs are not attached, the boot disk is used to store
runtime logs and `HDFS <https://hadoop.apache.org/docs/r1.2.1/
hdfs_user_guide.html>`__ data. If one or more SSDs are
attached, this runtime bulk data is spread across them, and
the boot disk contains only basic config and installed
binaries.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.DiskConfig)
))
_sym_db.RegisterMessage(DiskConfig)
NodeInitializationAction = _reflection.GeneratedProtocolMessageType('NodeInitializationAction', (_message.Message,), dict(
DESCRIPTOR = _NODEINITIALIZATIONACTION,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """Specifies an executable to run on a fully configured node and a timeout
period for executable completion.
Attributes:
executable_file:
Required. Google Cloud Storage URI of executable file.
execution_timeout:
Optional. Amount of time executable has to complete. Default
is 10 minutes. Cluster creation fails with an explanatory
error message (the name of the executable that caused the
error and the exceeded timeout period) if the executable is
not completed at end of the timeout period.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.NodeInitializationAction)
))
_sym_db.RegisterMessage(NodeInitializationAction)
ClusterStatus = _reflection.GeneratedProtocolMessageType('ClusterStatus', (_message.Message,), dict(
DESCRIPTOR = _CLUSTERSTATUS,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """The status of a cluster and its instances.
Attributes:
state:
Output-only. The cluster's state.
detail:
Output-only. Optional details of cluster's state.
state_start_time:
Output-only. Time when this state was entered.
substate:
Output-only. Additional state information that includes status
reported by the agent.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterStatus)
))
_sym_db.RegisterMessage(ClusterStatus)
SoftwareConfig = _reflection.GeneratedProtocolMessageType('SoftwareConfig', (_message.Message,), dict(
PropertiesEntry = _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), dict(
DESCRIPTOR = _SOFTWARECONFIG_PROPERTIESENTRY,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntry)
))
,
DESCRIPTOR = _SOFTWARECONFIG,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """Specifies the selection and config of software inside the cluster.
Attributes:
image_version:
Optional. The version of software inside the cluster. It must
match the regular expression ``[0-9]+\.[0-9]+``. If
unspecified, it defaults to the latest version (see `Cloud
Dataproc Versioning </dataproc/versioning>`__).
properties:
Optional. The properties to set on daemon config files.
Property keys are specified in ``prefix:property`` format,
such as ``core:fs.defaultFS``. The following are supported
prefixes and their mappings: - capacity-scheduler:
``capacity-scheduler.xml`` - core: ``core-site.xml`` -
distcp: ``distcp-default.xml`` - hdfs: ``hdfs-site.xml`` -
hive: ``hive-site.xml`` - mapred: ``mapred-site.xml`` - pig:
``pig.properties`` - spark: ``spark-defaults.conf`` - yarn:
``yarn-site.xml`` For more information, see `Cluster
properties </dataproc/docs/concepts/cluster-properties>`__.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SoftwareConfig)
))
_sym_db.RegisterMessage(SoftwareConfig)
_sym_db.RegisterMessage(SoftwareConfig.PropertiesEntry)
ClusterMetrics = _reflection.GeneratedProtocolMessageType('ClusterMetrics', (_message.Message,), dict(
HdfsMetricsEntry = _reflection.GeneratedProtocolMessageType('HdfsMetricsEntry', (_message.Message,), dict(
DESCRIPTOR = _CLUSTERMETRICS_HDFSMETRICSENTRY,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry)
))
,
YarnMetricsEntry = _reflection.GeneratedProtocolMessageType('YarnMetricsEntry', (_message.Message,), dict(
DESCRIPTOR = _CLUSTERMETRICS_YARNMETRICSENTRY,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry)
))
,
DESCRIPTOR = _CLUSTERMETRICS,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """Contains cluster daemon metrics, such as HDFS and YARN stats.
**Beta Feature**: This report is available for testing purposes only. It
may be changed before final release.
Attributes:
hdfs_metrics:
The HDFS metrics.
yarn_metrics:
The YARN metrics.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterMetrics)
))
_sym_db.RegisterMessage(ClusterMetrics)
_sym_db.RegisterMessage(ClusterMetrics.HdfsMetricsEntry)
_sym_db.RegisterMessage(ClusterMetrics.YarnMetricsEntry)
CreateClusterRequest = _reflection.GeneratedProtocolMessageType('CreateClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATECLUSTERREQUEST,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """A request to create a cluster.
Attributes:
project_id:
Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region:
Required. The Cloud Dataproc region in which to handle the
request.
cluster:
Required. The cluster to create.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.CreateClusterRequest)
))
_sym_db.RegisterMessage(CreateClusterRequest)
UpdateClusterRequest = _reflection.GeneratedProtocolMessageType('UpdateClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _UPDATECLUSTERREQUEST,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """A request to update a cluster.
Attributes:
project_id:
Required. The ID of the Google Cloud Platform project the
cluster belongs to.
region:
Required. The Cloud Dataproc region in which to handle the
request.
cluster_name:
Required. The cluster name.
cluster:
Required. The changes to the cluster.
update_mask:
Required. Specifies the path, relative to ``Cluster``, of the
field to update. For example, to change the number of workers
in a cluster to 5, the ``update_mask`` parameter would be
specified as ``config.worker_config.num_instances``, and the
``PATCH`` request body would specify the new value, as
follows: :: { "config":{ "workerConfig":{
"numInstances":"5" } } } Similarly, to
change the number of preemptible workers in a cluster to 5,
the ``update_mask`` parameter would be
``config.secondary_worker_config.num_instances``, and the
``PATCH`` request body would be set as follows: :: {
"config":{ "secondaryWorkerConfig":{
"numInstances":"5" } } } Note: Currently,
only the following fields can be updated: .. raw:: html
<table> .. raw:: html <tbody> .. raw:: html <tr> ..
raw:: html <td> Mask .. raw:: html </td> .. raw::
html <td> Purpose .. raw:: html </td> .. raw:: html
</tr> .. raw:: html <tr> .. raw:: html <td> labels
.. raw:: html </td> .. raw:: html <td> Update labels
.. raw:: html </td> .. raw:: html </tr> .. raw::
html <tr> .. raw:: html <td>
config.worker\_config.num\_instances .. raw:: html </td>
.. raw:: html <td> Resize primary worker group .. raw::
html </td> .. raw:: html </tr> .. raw:: html
<tr> .. raw:: html <td>
config.secondary\_worker\_config.num\_instances .. raw:: html
</td> .. raw:: html <td> Resize secondary worker group
.. raw:: html </td> .. raw:: html </tr> .. raw::
html </tbody> .. raw:: html </table>
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.UpdateClusterRequest)
))
_sym_db.RegisterMessage(UpdateClusterRequest)
DeleteClusterRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETECLUSTERREQUEST,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """A request to delete a cluster.
Attributes:
project_id:
Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region:
Required. The Cloud Dataproc region in which to handle the
request.
cluster_name:
Required. The cluster name.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.DeleteClusterRequest)
))
_sym_db.RegisterMessage(DeleteClusterRequest)
GetClusterRequest = _reflection.GeneratedProtocolMessageType('GetClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCLUSTERREQUEST,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """Request to get the resource representation for a cluster in a project.
Attributes:
project_id:
Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region:
Required. The Cloud Dataproc region in which to handle the
request.
cluster_name:
Required. The cluster name.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GetClusterRequest)
))
_sym_db.RegisterMessage(GetClusterRequest)
ListClustersRequest = _reflection.GeneratedProtocolMessageType('ListClustersRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTCLUSTERSREQUEST,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """A request to list the clusters in a project.
Attributes:
project_id:
Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region:
Required. The Cloud Dataproc region in which to handle the
request.
filter:
Optional. A filter constraining the clusters to list. Filters
are case-sensitive and have the following syntax: field =
value [AND [field = value]] ... where **field** is one of
``status.state``, ``clusterName``, or ``labels.[KEY]``, and
``[KEY]`` is a label key. **value** can be ``*`` to match all
values. ``status.state`` can be one of the following:
``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``,
``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` contains
the ``CREATING``, ``UPDATING``, and ``RUNNING`` states.
``INACTIVE`` contains the ``DELETING`` and ``ERROR`` states.
``clusterName`` is the name of the cluster provided at
creation time. Only the logical ``AND`` operator is supported;
space-separated items are treated as having an implicit
``AND`` operator. Example filter: status.state = ACTIVE AND
clusterName = mycluster AND labels.env = staging AND
labels.starred = \*
page_size:
Optional. The standard List page size.
page_token:
Optional. The standard List page token.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ListClustersRequest)
))
_sym_db.RegisterMessage(ListClustersRequest)
ListClustersResponse = _reflection.GeneratedProtocolMessageType('ListClustersResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTCLUSTERSRESPONSE,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """The list of all clusters in a project.
Attributes:
clusters:
Output-only. The clusters in the project.
next_page_token:
Output-only. This token is included in the response if there
are more results to fetch. To fetch additional results,
provide this value as the ``page_token`` in a subsequent
``ListClustersRequest``.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ListClustersResponse)
))
_sym_db.RegisterMessage(ListClustersResponse)
DiagnoseClusterRequest = _reflection.GeneratedProtocolMessageType('DiagnoseClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _DIAGNOSECLUSTERREQUEST,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """A request to collect cluster diagnostic information.
Attributes:
project_id:
Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region:
Required. The Cloud Dataproc region in which to handle the
request.
cluster_name:
Required. The cluster name.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.DiagnoseClusterRequest)
))
_sym_db.RegisterMessage(DiagnoseClusterRequest)
DiagnoseClusterResults = _reflection.GeneratedProtocolMessageType('DiagnoseClusterResults', (_message.Message,), dict(
DESCRIPTOR = _DIAGNOSECLUSTERRESULTS,
__module__ = 'google.cloud.dataproc_v1.proto.clusters_pb2'
,
__doc__ = """The location of diagnostic output.
Attributes:
output_uri:
Output-only. The Google Cloud Storage URI of the diagnostic
output. The output report is a plain text file with a summary
of collected diagnostics.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.DiagnoseClusterResults)
))
_sym_db.RegisterMessage(DiagnoseClusterResults)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.cloud.dataproc.v1B\rClustersProtoP\[email protected]/genproto/googleapis/cloud/dataproc/v1;dataproc'))
_CLUSTER_LABELSENTRY.has_options = True
_CLUSTER_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_GCECLUSTERCONFIG_METADATAENTRY.has_options = True
_GCECLUSTERCONFIG_METADATAENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_SOFTWARECONFIG_PROPERTIESENTRY.has_options = True
_SOFTWARECONFIG_PROPERTIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CLUSTERMETRICS_HDFSMETRICSENTRY.has_options = True
_CLUSTERMETRICS_HDFSMETRICSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CLUSTERMETRICS_YARNMETRICSENTRY.has_options = True
_CLUSTERMETRICS_YARNMETRICSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CLUSTERCONTROLLER = _descriptor.ServiceDescriptor(
name='ClusterController',
full_name='google.cloud.dataproc.v1.ClusterController',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=3815,
serialized_end=4889,
methods=[
_descriptor.MethodDescriptor(
name='CreateCluster',
full_name='google.cloud.dataproc.v1.ClusterController.CreateCluster',
index=0,
containing_service=None,
input_type=_CREATECLUSTERREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002>\"3/v1/projects/{project_id}/regions/{region}/clusters:\007cluster')),
),
_descriptor.MethodDescriptor(
name='UpdateCluster',
full_name='google.cloud.dataproc.v1.ClusterController.UpdateCluster',
index=1,
containing_service=None,
input_type=_UPDATECLUSTERREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002M2B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\007cluster')),
),
_descriptor.MethodDescriptor(
name='DeleteCluster',
full_name='google.cloud.dataproc.v1.ClusterController.DeleteCluster',
index=2,
containing_service=None,
input_type=_DELETECLUSTERREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002D*B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}')),
),
_descriptor.MethodDescriptor(
name='GetCluster',
full_name='google.cloud.dataproc.v1.ClusterController.GetCluster',
index=3,
containing_service=None,
input_type=_GETCLUSTERREQUEST,
output_type=_CLUSTER,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002D\022B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}')),
),
_descriptor.MethodDescriptor(
name='ListClusters',
full_name='google.cloud.dataproc.v1.ClusterController.ListClusters',
index=4,
containing_service=None,
input_type=_LISTCLUSTERSREQUEST,
output_type=_LISTCLUSTERSRESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0025\0223/v1/projects/{project_id}/regions/{region}/clusters')),
),
_descriptor.MethodDescriptor(
name='DiagnoseCluster',
full_name='google.cloud.dataproc.v1.ClusterController.DiagnoseCluster',
index=5,
containing_service=None,
input_type=_DIAGNOSECLUSTERREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002P\"K/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\001*')),
),
])
_sym_db.RegisterServiceDescriptor(_CLUSTERCONTROLLER)
DESCRIPTOR.services_by_name['ClusterController'] = _CLUSTERCONTROLLER
# @@protoc_insertion_point(module_scope)
|
apache-2.0
|
magne4000/werkzeug
|
werkzeug/contrib/limiter.py
|
365
|
1334
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.limiter
~~~~~~~~~~~~~~~~~~~~~~~~
A middleware that limits incoming data. This works around problems with
Trac_ or Django_ because those directly stream into the memory.
.. _Trac: http://trac.edgewall.org/
.. _Django: http://www.djangoproject.com/
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from warnings import warn
from werkzeug.wsgi import LimitedStream
class StreamLimitMiddleware(object):
"""Limits the input stream to a given number of bytes. This is useful if
you have a WSGI application that reads form data into memory (django for
example) and you don't want users to harm the server by uploading tons of
data.
Default is 10MB
.. versionchanged:: 0.9
Deprecated middleware.
"""
def __init__(self, app, maximum_size=1024 * 1024 * 10):
warn(DeprecationWarning('This middleware is deprecated'))
self.app = app
self.maximum_size = maximum_size
def __call__(self, environ, start_response):
limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
return self.app(environ, start_response)
|
bsd-3-clause
|
nrc/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_handshake_sig_wsh.py
|
499
|
1859
|
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrong web_socket_do_extra_handshake signature.
"""
def no_web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
request.connection.write(
'sub/wrong_handshake_sig_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
|
mpl-2.0
|
ad-m/petycja-faoo
|
docs/conf.py
|
2
|
7782
|
# -*- coding: utf-8 -*-
#
# ankieta documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ankieta'
copyright = u"2015, Adam Dobrawy"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ankietadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'ankieta.tex',
u'ankieta Documentation',
u"Adam Dobrawy", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ankieta', u'ankieta Documentation',
[u"Adam Dobrawy"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ankieta', u'ankieta Documentation',
u"Adam Dobrawy", 'ankieta',
'Jawność kampanii wyborczej', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
bsd-3-clause
|
chenyyx/scikit-learn-doc-zh
|
examples/en/neighbors/plot_digits_kde_sampling.py
|
108
|
2026
|
"""
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
|
gpl-3.0
|
RobinD42/pyside
|
tests/QtGui/qpainter_test.py
|
6
|
3431
|
import unittest
from PySide.QtGui import QPainter, QLinearGradient
from PySide.QtCore import QLine, QLineF, QPoint, QPointF, QRect, QRectF, Qt
class QPainterDrawText(unittest.TestCase):
def setUp(self):
self.painter = QPainter()
self.text = 'teste!'
def tearDown(self):
del self.text
del self.painter
def testDrawText(self):
# bug #254
rect = self.painter.drawText(100, 100, 100, 100,
Qt.AlignCenter | Qt.TextWordWrap,
self.text)
self.assert_(isinstance(rect, QRect))
def testDrawTextWithRect(self):
# bug #225
rect = QRect(100, 100, 100, 100)
newRect = self.painter.drawText(rect, Qt.AlignCenter | Qt.TextWordWrap,
self.text)
self.assert_(isinstance(newRect, QRect))
def testDrawTextWithRectF(self):
'''QPainter.drawText(QRectF, ... ,QRectF*) inject code'''
rect = QRectF(100, 52.3, 100, 100)
newRect = self.painter.drawText(rect, Qt.AlignCenter | Qt.TextWordWrap,
self.text)
self.assert_(isinstance(newRect, QRectF))
def testDrawOverloads(self):
'''Calls QPainter.drawLines overloads, if something is
wrong Exception and chaos ensues. Bug #395'''
self.painter.drawLines([QLine(QPoint(0,0), QPoint(1,1))])
self.painter.drawLines([QPoint(0,0), QPoint(1,1)])
self.painter.drawLines([QPointF(0,0), QPointF(1,1)])
self.painter.drawLines([QLineF(QPointF(0,0), QPointF(1,1))])
self.painter.drawPoints([QPoint(0,0), QPoint(1,1)])
self.painter.drawPoints([QPointF(0,0), QPointF(1,1)])
self.painter.drawConvexPolygon([QPointF(10.0, 80.0),
QPointF(20.0, 10.0),
QPointF(80.0, 30.0),
QPointF(90.0, 70.0)])
self.painter.drawConvexPolygon([QPoint(10.0, 80.0),
QPoint(20.0, 10.0),
QPoint(80.0, 30.0),
QPoint(90.0, 70.0)])
self.painter.drawPolygon([QPointF(10.0, 80.0),
QPointF(20.0, 10.0),
QPointF(80.0, 30.0),
QPointF(90.0, 70.0)])
self.painter.drawPolygon([QPoint(10.0, 80.0),
QPoint(20.0, 10.0),
QPoint(80.0, 30.0),
QPoint(90.0, 70.0)])
self.painter.drawPolyline([QPointF(10.0, 80.0),
QPointF(20.0, 10.0),
QPointF(80.0, 30.0),
QPointF(90.0, 70.0)])
self.painter.drawPolyline([QPoint(10.0, 80.0),
QPoint(20.0, 10.0),
QPoint(80.0, 30.0),
QPoint(90.0, 70.0)])
class SetBrushWithOtherArgs(unittest.TestCase):
'''Using qpainter.setBrush with args other than QBrush'''
def testSetBrushGradient(self):
painter = QPainter()
gradient = QLinearGradient(0, 0, 0, 0)
painter.setBrush(gradient)
if __name__ == '__main__':
unittest.main()
|
lgpl-2.1
|
mp2apps/pesetacoin-master
|
share/qt/extract_strings_qt.py
|
2945
|
1844
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
|
mit
|
oxford-pcs/zemax_controller
|
MeritFunction.py
|
1
|
6082
|
class MeritFunctionError(Exception):
def __init__(self, message, error):
super(Exception, self).__init__(message)
self.errors = error
class MeritFunction():
'''
This class provides functionality to create a merit function
using Zemax's command DEFAULTMERIT.
To create a merit fnction, a blank (or not, it will be
overwritten) ZPL file is required to be placed in the macros
directory of your Zemax distribution. This file cannot be
created on-the-fly as Zemax only populates its available macro
list at runtime.
'''
def __init__(self, zmx_link, lens_data, mfe_zpl_path, mfe_zpl_filename):
self.zmx_link = zmx_link
self.lens_data = lens_data
self.mfe_zpl_path = mfe_zpl_path
self.mfe_zpl_filename = mfe_zpl_filename
def _constructCommand(self, atype=0, data=0, reference=0, method=1,
rings=8, arms=3, grid=8, delete=0, axial=-1,
lateral=1, start=-1, xweight=1, oweight=1,
pup_obsc=0):
'''
Write the ZPL parsable command to the .ZPL file.
atype use 0 for RMS, 1, for PTV.
data use 0 for wavefront, 1 for spot radius, 2 for spot x,
3 for spot y, 4 for spot x + y.
reference use 0 for centroid, 1 for chief, 2 for unreferenced.
method use 1 for Gaussian quadrature, 2 for rectangular array.
rings the number of annular rings (Gaussian quadrature only).
arms the number of radial arms (Gaussian quadrature only).
The number of arms must be even and no less than 6.
grid the size of the grid. Use an integer, such as 8, for an
8 x 8 grid. n must be even and no less than 4.
delete use 0 to not delete vignetted rays, 1 to delete
vignetted rays.
axial use -1 for automatic, which will use symmetry only if
the system is axial symmetric. Use 1 to assume axial
symmetry, 0 to not assume axial symmetry.
lateral use 1 to ignore lateral color, 0 otherwise.
start use -1 for automatic, which will add the default merit
function after any existing DMFS operand. Otherwise use
the operand number at which to add the default merit
function. Any existing operands above the specified
operand number will be retained.
xweight the x direction weight (only spot x+y),
oweight the overall weight for the merit function.
pup_obsc the pupil obscuration ratio.
'''
try:
with open(self.mfe_zpl_path + self.mfe_zpl_filename, 'w') as f:
MF_parameters = ["DEFAULTMERIT", str(atype), str(data),
str(reference), str(method), str(rings),
str(arms), str(grid), str(delete), str(axial),
str(lateral), str(start), str(xweight),
str(oweight), str(pup_obsc)]
f.write("CLOSEWINDOW\n")
f.write(', '.join(MF_parameters))
except IOError:
raise MeritFunctionError(".ZPL file could not be found at this \
path.", 1)
def _getMFEContents(self):
'''
Get MFE contents.
'''
self.zmx_link.zInsertMFO(1)
n_operands = self.zmx_link.zDeleteMFO(1)
contents = self.zmx_link.ipzGetMFE(end_row=n_operands, pprint=False)
return contents
def _DDEToLDE(self):
self.lens_data.DDEToLDE()
def _LDEToDDE(self):
self.lens_data.LDEToDDE()
def createDefaultMF(self, atype=0, data=1, reference=0, method=1,
rings=3, arms=3, grid=8, delete=0, axial=0,
lateral=1, start=-1, xweight=1, oweight=1,
pup_obsc=0):
'''
Create a default Merit Function and place it in the DDE.
See _constructCommand() for parameter explanations.
'''
# Make .ZPL command and write to macro
self._constructCommand(atype, data, reference, method, rings, arms,
grid, delete, axial, lateral, start, xweight,
oweight, pup_obsc)
# Execute command and move data from LDE to DDE. Note that executing
# a macro only updates the LDE and so we need to update the DDE
# to access the updated function.
#
zpl_code = self.mfe_zpl_filename[0:3]
rtn_code = self.zmx_link.zExecuteZPLMacro(zpl_code)
self._LDEToDDE()
def delMFOperand(self, row_number):
'''
Delete a MF operand from the MFE.
'''
self.zmx_link.zDeleteMFO(row_number)
self._LDEToDDE()
def getRowNumberFromMFContents(self, oper, comment=None):
'''
Get row number number of an operand in the MFE given the
operand name and (optionally) comment.
'''
for idx, row in enumerate(self._getMFEContents()):
if row.Oper == oper:
if comment is not None and row.int1 == comment:
return idx+1
else:
return idx+1
return 0
def setAirGapConstraints(self, ins_row_number, surface_number, min_gap,
max_gap):
'''
Add air gap constraints.
'''
self.zmx_link.zInsertMFO(ins_row_number)
self.zmx_link.zSetOperandRow(ins_row_number, "MNCA", int1=surface_number,
int2=surface_number, data1=None,
data2=None, data3=None, data4=None,
data5=None, data6=None, tgt=min_gap,
wgt=1.0)
self.zmx_link.zInsertMFO(ins_row_number)
self.zmx_link.zSetOperandRow(ins_row_number, "MXCA", int1=surface_number,
int2=surface_number, data1=None,
data2=None, data3=None, data4=None,
data5=None, data6=None, tgt=max_gap,
wgt=1.0)
self._DDEToLDE()
|
mit
|
dcroc16/skunk_works
|
google_appengine/lib/django-1.4/django/utils/dateparse.py
|
96
|
2896
|
"""Functions to parse datetime objects."""
# We're using regular expressions rather than time.strptime because:
# - They provide both validation and parsing.
# - They're more flexible for datetimes.
# - The date/datetime/time constructors produce friendlier error messages.
import datetime
import re
from django.utils.timezone import utc
from django.utils.tzinfo import FixedOffset
date_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$'
)
datetime_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{1,2}:\d{1,2})?$'
)
time_re = re.compile(
r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
)
def parse_date(value):
"""Parses a string and return a datetime.date.
Raises ValueError if the input is well formatted but not a valid date.
Returns None if the input isn't well formatted.
"""
match = date_re.match(value)
if match:
kw = dict((k, int(v)) for k, v in match.groupdict().iteritems())
return datetime.date(**kw)
def parse_time(value):
"""Parses a string and return a datetime.time.
This function doesn't support time zone offsets.
Sub-microsecond precision is accepted, but ignored.
Raises ValueError if the input is well formatted but not a valid time.
Returns None if the input isn't well formatted, in particular if it
contains an offset.
"""
match = time_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
kw = dict((k, int(v)) for k, v in kw.iteritems() if v is not None)
return datetime.time(**kw)
def parse_datetime(value):
"""Parses a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses an instance of FixedOffset as tzinfo.
Sub-microsecond precision is accepted, but ignored.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = datetime_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = kw.pop('tzinfo')
if tzinfo == 'Z':
tzinfo = utc
elif tzinfo is not None:
offset = 60 * int(tzinfo[1:3]) + int(tzinfo[4:6])
if tzinfo[0] == '-':
offset = -offset
tzinfo = FixedOffset(offset)
kw = dict((k, int(v)) for k, v in kw.iteritems() if v is not None)
kw['tzinfo'] = tzinfo
return datetime.datetime(**kw)
|
mit
|
leki75/ansible
|
lib/ansible/module_utils/facts/other/facter.py
|
232
|
2985
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts.collector import BaseFactCollector
class FacterFactCollector(BaseFactCollector):
name = 'facter'
_fact_ids = set(['facter'])
def __init__(self, collectors=None, namespace=None):
namespace = PrefixFactNamespace(namespace_name='facter',
prefix='facter_')
super(FacterFactCollector, self).__init__(collectors=collectors,
namespace=namespace)
def find_facter(self, module):
facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cfacter_path = module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
return facter_path
def run_facter(self, module, facter_path):
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
rc, out, err = module.run_command(facter_path + " --puppet --json")
return rc, out, err
def get_facter_output(self, module):
facter_path = self.find_facter(module)
if not facter_path:
return None
rc, out, err = self.run_facter(module, facter_path)
if rc != 0:
return None
return out
def collect(self, module=None, collected_facts=None):
# Note that this mirrors previous facter behavior, where there isnt
# a 'ansible_facter' key in the main fact dict, but instead, 'facter_whatever'
# items are added to the main dict.
facter_dict = {}
if not module:
return facter_dict
facter_output = self.get_facter_output(module)
# TODO: if we fail, should we add a empty facter key or nothing?
if facter_output is None:
return facter_dict
try:
facter_dict = json.loads(facter_output)
except Exception:
# FIXME: maybe raise a FactCollectorError with some info attrs?
pass
return facter_dict
|
gpl-3.0
|
windofthesky/thrift
|
test/crossrunner/util.py
|
55
|
1057
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import copy
def merge_dict(base, update):
"""Update dict concatenating list values"""
res = copy.deepcopy(base)
for k, v in list(update.items()):
if k in list(res.keys()) and isinstance(v, list):
res[k].extend(v)
else:
res[k] = v
return res
|
apache-2.0
|
cloudtools/troposphere
|
troposphere/emr.py
|
1
|
15025
|
# Copyright (c) 2012-2013, Antonio Alonso Dominguez <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSHelperFn, AWSObject, AWSProperty, Tags
from .validators import boolean, defer, double, integer, positive_integer
CHANGE_IN_CAPACITY = "CHANGE_IN_CAPACITY"
PERCENT_CHANGE_IN_CAPACITY = "PERCENT_CHANGE_IN_CAPACITY"
EXACT_CAPACITY = "EXACT_CAPACITY"
ACTIONS_ON_FAILURE = (
"TERMINATE_CLUSTER",
"CANCEL_AND_WAIT",
"CONTINUE",
"TERMINATE_JOB_FLOW",
)
def validate_action_on_failure(action_on_failure):
"""Validate action on failure for EMR StepConfig"""
if action_on_failure not in ACTIONS_ON_FAILURE:
raise ValueError(
"StepConfig ActionOnFailure must be one of: %s"
% ", ".join(ACTIONS_ON_FAILURE)
)
return action_on_failure
class KeyValue(AWSProperty):
props = {"Key": (str, True), "Value": (str, True)}
def __init__(self, key=None, value=None, **kwargs):
# provided for backward compatibility
if key is not None:
kwargs["Key"] = key
if value is not None:
kwargs["Value"] = value
super().__init__(**kwargs)
MetricDimension = KeyValue
def additional_info_validator(xs):
if not isinstance(xs, dict):
raise ValueError("AdditionalInfo must be a dict of " "string to string pairs")
for k, v in xs.items():
if not isinstance(k, str):
raise ValueError("AdditionalInfo keys must be strings")
if not isinstance(v, str):
raise ValueError("AdditionalInfo values must be strings")
return xs
class SecurityConfiguration(AWSObject):
resource_type = "AWS::EMR::SecurityConfiguration"
props = {"Name": (str, False), "SecurityConfiguration": (dict, True)}
class Application(AWSProperty):
props = {
"AdditionalInfo": (additional_info_validator, False),
"Args": ([str], False),
"Name": (str, False),
"Version": (str, False),
}
class ScriptBootstrapActionConfig(AWSProperty):
props = {"Args": ([str], False), "Path": (str, True)}
class BootstrapActionConfig(AWSProperty):
props = {
"Name": (str, True),
"ScriptBootstrapAction": (ScriptBootstrapActionConfig, True),
}
def properties_validator(xs):
if not isinstance(xs, dict):
raise ValueError(
"ConfigurationProperties must be a dict of " "string to string pairs"
)
for k, v in xs.items():
if not isinstance(k, str):
raise ValueError("ConfigurationProperties keys must be strings")
if not isinstance(v, str) and not isinstance(v, AWSHelperFn):
raise ValueError(
"ConfigurationProperties values must be strings" " or helper functions"
)
return xs
class Configuration(AWSProperty):
props = {
"Classification": (str, False),
"ConfigurationProperties": (properties_validator, False),
}
# we must define this one afterwards since Configuration does not exist
# before Configuration is done initializing
Configuration.props["Configurations"] = ([Configuration], False)
def market_validator(x):
valid_values = ["ON_DEMAND", "SPOT"]
if x not in valid_values:
raise ValueError("Market must be one of: %s" % ", ".join(valid_values))
return x
def volume_type_validator(x):
valid_values = ["standard", "io1", "gp2"]
if x not in valid_values:
raise ValueError("VolumeType must be one of: %s" % ", ".join(valid_values))
return x
class VolumeSpecification(AWSProperty):
props = {
"Iops": (integer, False),
"SizeInGB": (integer, True),
"VolumeType": (volume_type_validator, True),
}
class EbsBlockDeviceConfigs(AWSProperty):
props = {
"VolumeSpecification": (VolumeSpecification, True),
"VolumesPerInstance": (integer, False),
}
class EbsConfiguration(AWSProperty):
props = {
"EbsBlockDeviceConfigs": ([EbsBlockDeviceConfigs], False),
"EbsOptimized": (boolean, False),
}
class ScalingConstraints(AWSProperty):
props = {"MinCapacity": (integer, True), "MaxCapacity": (integer, True)}
class CloudWatchAlarmDefinition(AWSProperty):
props = {
"ComparisonOperator": (str, True),
"Dimensions": ([MetricDimension], False),
"EvaluationPeriods": (positive_integer, False),
"MetricName": (str, True),
"Namespace": (str, False),
"Period": (positive_integer, True),
"Statistic": (str, False),
"Threshold": (positive_integer, True),
"Unit": (str, False),
}
class ScalingTrigger(AWSProperty):
props = {
"CloudWatchAlarmDefinition": (CloudWatchAlarmDefinition, True),
}
class SimpleScalingPolicyConfiguration(AWSProperty):
props = {
"AdjustmentType": (str, False),
"CoolDown": (positive_integer, False),
"ScalingAdjustment": (defer, True),
}
def validate(self):
if (
"AdjustmentType" in self.properties
and "ScalingAdjustment" in self.properties
):
valid_values = [
CHANGE_IN_CAPACITY,
PERCENT_CHANGE_IN_CAPACITY,
EXACT_CAPACITY,
]
adjustment_type = self.properties.get("AdjustmentType", None)
scaling_adjustment = self.properties.get("ScalingAdjustment", None)
if adjustment_type not in valid_values:
raise ValueError(
"Only CHANGE_IN_CAPACITY, PERCENT_CHANGE_IN_CAPACITY, or"
" EXACT_CAPACITY are valid AdjustmentTypes"
)
if adjustment_type == CHANGE_IN_CAPACITY:
integer(scaling_adjustment)
elif adjustment_type == PERCENT_CHANGE_IN_CAPACITY:
double(scaling_adjustment)
f = float(scaling_adjustment)
if f < 0.0 or f > 1.0:
raise ValueError(
"ScalingAdjustment value must be between 0.0 and 1.0"
" value was %0.2f" % f
)
elif adjustment_type == EXACT_CAPACITY:
positive_integer(scaling_adjustment)
else:
raise ValueError(
"ScalingAdjustment value must be" " an integer or a float"
)
class ScalingAction(AWSProperty):
props = {
"Market": (market_validator, False),
"SimpleScalingPolicyConfiguration": (SimpleScalingPolicyConfiguration, True),
}
class ScalingRule(AWSProperty):
props = {
"Action": (ScalingAction, True),
"Description": (str, False),
"Name": (str, True),
"Trigger": (ScalingTrigger, True),
}
class AutoScalingPolicy(AWSProperty):
props = {
"Constraints": (ScalingConstraints, True),
"Rules": ([ScalingRule], False),
}
class InstanceGroupConfigProperty(AWSProperty):
props = {
"AutoScalingPolicy": (AutoScalingPolicy, False),
"BidPrice": (str, False),
"Configurations": ([Configuration], False),
"EbsConfiguration": (EbsConfiguration, False),
"InstanceCount": (positive_integer, True),
"InstanceType": (str, True),
"Market": (market_validator, False),
"Name": (str, False),
}
class OnDemandProvisioningSpecification(AWSProperty):
props = {
"AllocationStrategy": (str, True),
}
def validate(self):
valid_values = ["lowest-price"]
allocation_strategy = self.properties.get("AllocationStrategy", None)
if allocation_strategy not in valid_values:
raise ValueError(
"AllocationStrategy %s is not valid. Valid options are %s"
% (allocation_strategy, ", ".join(valid_values))
)
class SpotProvisioningSpecification(AWSProperty):
props = {
"AllocationStrategy": (str, False),
"BlockDurationMinutes": (positive_integer, False),
"TimeoutAction": (str, True),
"TimeoutDurationMinutes": (positive_integer, True),
}
def validate(self):
if "AllocationStrategy" in self.properties:
valid_values = ["capacity-optimized"]
allocation_strategy = self.properties.get("AllocationStrategy", None)
if allocation_strategy not in valid_values:
raise ValueError(
"AllocationStrategy %s is not valid. Valid options are %s"
% (allocation_strategy, ", ".join(valid_values))
)
class InstanceFleetProvisioningSpecifications(AWSProperty):
props = {
"OnDemandSpecification": (OnDemandProvisioningSpecification, False),
"SpotSpecification": (SpotProvisioningSpecification, False),
}
class InstanceTypeConfig(AWSProperty):
props = {
"BidPrice": (str, False),
"BidPriceAsPercentageOfOnDemandPrice": (str, False),
"Configurations": ([Configuration], False),
"EbsConfiguration": (EbsConfiguration, False),
"InstanceType": (str, True),
"WeightedCapacity": (positive_integer, False),
}
class InstanceFleetConfigProperty(AWSProperty):
props = {
"InstanceTypeConfigs": ([InstanceTypeConfig], False),
"LaunchSpecifications": (InstanceFleetProvisioningSpecifications, False),
"Name": (str, False),
"TargetOnDemandCapacity": (positive_integer, False),
"TargetSpotCapacity": (positive_integer, False),
}
class PlacementType(AWSProperty):
props = {"AvailabilityZone": (str, True)}
class JobFlowInstancesConfig(AWSProperty):
props = {
"AdditionalMasterSecurityGroups": ([str], False),
"AdditionalSlaveSecurityGroups": ([str], False),
"CoreInstanceFleet": (InstanceFleetConfigProperty, False),
"CoreInstanceGroup": (InstanceGroupConfigProperty, False),
"Ec2KeyName": (str, False),
"Ec2SubnetId": (str, False),
"Ec2SubnetIds": ([str], False),
"EmrManagedMasterSecurityGroup": (str, False),
"EmrManagedSlaveSecurityGroup": (str, False),
"HadoopVersion": (str, False),
"KeepJobFlowAliveWhenNoSteps": (boolean, False),
"MasterInstanceFleet": (InstanceFleetConfigProperty, False),
"MasterInstanceGroup": (InstanceGroupConfigProperty, False),
"Placement": (PlacementType, False),
"ServiceAccessSecurityGroup": (str, False),
"TerminationProtected": (boolean, False),
}
class KerberosAttributes(AWSProperty):
props = {
"ADDomainJoinPassword": (str, False),
"ADDomainJoinUser": (str, False),
"CrossRealmTrustPrincipalPassword": (str, False),
"KdcAdminPassword": (str, True),
"Realm": (str, True),
}
class ComputeLimits(AWSProperty):
props = {
"MaximumCapacityUnits": (integer, True),
"MaximumCoreCapacityUnits": (integer, False),
"MaximumOnDemandCapacityUnits": (integer, False),
"MinimumCapacityUnits": (integer, True),
"UnitType": (str, True),
}
class ManagedScalingPolicy(AWSProperty):
props = {
"ComputeLimits": (ComputeLimits, False),
}
class HadoopJarStepConfig(AWSProperty):
props = {
"Args": ([str], False),
"Jar": (str, True),
"MainClass": (str, False),
"StepProperties": ([KeyValue], False),
}
class StepConfig(AWSProperty):
props = {
"ActionOnFailure": (validate_action_on_failure, False),
"HadoopJarStep": (HadoopJarStepConfig, True),
"Name": (str, True),
}
class Cluster(AWSObject):
resource_type = "AWS::EMR::Cluster"
props = {
"AdditionalInfo": (dict, False),
"Applications": ([Application], False),
"AutoScalingRole": (str, False),
"BootstrapActions": ([BootstrapActionConfig], False),
"Configurations": ([Configuration], False),
"CustomAmiId": (str, False),
"EbsRootVolumeSize": (positive_integer, False),
"Instances": (JobFlowInstancesConfig, True),
"JobFlowRole": (str, True),
"KerberosAttributes": (KerberosAttributes, False),
"LogEncryptionKmsKeyId": (str, False),
"LogUri": (str, False),
"ManagedScalingPolicy": (ManagedScalingPolicy, False),
"Name": (str, True),
"ReleaseLabel": (str, False),
"ScaleDownBehavior": (str, False),
"SecurityConfiguration": (str, False),
"ServiceRole": (str, True),
"StepConcurrencyLevel": (integer, False),
"Steps": ([StepConfig], False),
"Tags": ((Tags, list), False),
"VisibleToAllUsers": (boolean, False),
}
class InstanceFleetConfig(AWSObject):
resource_type = "AWS::EMR::InstanceFleetConfig"
props = {
"ClusterId": (str, True),
"InstanceFleetType": (str, True),
"InstanceTypeConfigs": ([InstanceTypeConfig], False),
"LaunchSpecifications": (InstanceFleetProvisioningSpecifications, False),
"Name": (str, False),
"TargetOnDemandCapacity": (positive_integer, False),
"TargetSpotCapacity": (positive_integer, False),
}
class InstanceGroupConfig(AWSObject):
resource_type = "AWS::EMR::InstanceGroupConfig"
props = {
"AutoScalingPolicy": (AutoScalingPolicy, False),
"BidPrice": (str, False),
"Configurations": ([Configuration], False),
"EbsConfiguration": (EbsConfiguration, False),
"InstanceCount": (integer, True),
"InstanceRole": (str, True),
"InstanceType": (str, True),
"JobFlowId": (str, True),
"Market": (market_validator, False),
"Name": (str, False),
}
def action_on_failure_validator(x):
valid_values = ["CONTINUE", "CANCEL_AND_WAIT"]
if x not in valid_values:
raise ValueError("ActionOnFailure must be one of: %s" % ", ".join(valid_values))
return x
class Step(AWSObject):
resource_type = "AWS::EMR::Step"
props = {
"ActionOnFailure": (action_on_failure_validator, True),
"HadoopJarStep": (HadoopJarStepConfig, True),
"JobFlowId": (str, True),
"Name": (str, True),
}
class Studio(AWSObject):
resource_type = "AWS::EMR::Studio"
props = {
"AuthMode": (str, True),
"DefaultS3Location": (str, True),
"Description": (str, False),
"EngineSecurityGroupId": (str, True),
"Name": (str, True),
"ServiceRole": (str, True),
"SubnetIds": ([str], True),
"Tags": (Tags, False),
"UserRole": (str, True),
"VpcId": (str, True),
"WorkspaceSecurityGroupId": (str, True),
}
class StudioSessionMapping(AWSObject):
resource_type = "AWS::EMR::StudioSessionMapping"
props = {
"IdentityName": (str, True),
"IdentityType": (str, True),
"SessionPolicyArn": (str, True),
"StudioId": (str, True),
}
|
bsd-2-clause
|
codefisher/mozbutton_sdk
|
builder/web_extension.py
|
1
|
10238
|
import os
import json
import re
from builder.ext_button import Button, get_image, ExtensionConfigError, bytes_string
from builder.locales import WebExtensionLocal, message_name
class WebExtensionButton(Button):
def __init__(self, folders, buttons, settings, applications):
super(WebExtensionButton, self).__init__(folders, buttons, settings, applications)
self._button_background_js = {}
self.popup_files = {}
self.option_files = {}
self.webx_locale = None
if len(buttons) != 1 or len(self._manifests) != 1 or len(self._info) != 1:
raise ExtensionConfigError("WebExtensions can only have a single button in them. " + ", ".join(buttons))
self.the_button = buttons[0]
if len(self._manifests) == 0:
raise ExtensionConfigError(
"Buttons for WebExtensions must have a manifest.json file.")
button, data = list(self._manifests.items())[0]
if "images" in data:
self._icons[button] = data.get('images')
if "strings" in data: # looking to remove
for name, value in data.get("strings"):
self._strings[name] = value
self._name_of_ext = data.get("name")
folder, button, files = self._info[0]
if "messages.json" in files:
self.webx_locale = WebExtensionLocal(folder, settings.get('default_locale'))
if 'background.js' in files:
with open(os.path.join(folder, "background.js"), "r") as background:
self._button_background_js[button] = background.read()
for file_group, file_var in (
('popup', self.popup_files),
('option', self.option_files)):
if file_group in files:
for file_name in os.listdir(os.path.join(folder, file_group)):
if file_name[0] != ".":
path = os.path.join(folder, file_group, file_name)
file_var[file_name] = path
def get_file_strings(self, settings, button_locales):
manifest = {
"manifest_version": 2,
"name": "__MSG_extensionName__",
"version": settings.get('version'),
"description": "__MSG_extensionDescription__",
"homepage_url": settings.get('homepage'),
"author": settings.get('creator'),
"icons": {},
"browser_action": {
"browser_style": True,
"default_icon": {},
},
"applications": {
"gecko": {
"id": settings.get('extension_id'),
"strict_min_version": "42.0"
}
},
"default_locale": settings.get('default_locale').replace('-', '_'),
}
if settings.get('homepage'):
manifest["homepage_url"] = settings.get('homepage')
data = self._manifests.get(self.the_button)
if 'default_title' in data:
manifest['browser_action']["default_title"] = "__MSG_{}__".format(message_name(data.get('default_title')))
if 'content_scripts' in data:
manifest['content_scripts'] = data.get('content_scripts')
if 'web_accessible_resources' in data:
manifest['web_accessible_resources'] = data.get('web_accessible_resources')
if 'permissions' in data:
manifest['permissions'] = data['permissions']
for size in settings.get('icon_size'):
name = "icons/{}-{}".format(size, settings.get("icon"))
manifest['icons'][size] = name
manifest['browser_action']['default_icon'][size] = name
if self.popup_files:
manifest['browser_action']['default_popup'] = 'popup/panel.html'
if self.option_files:
manifest["options_ui"] = {"page": 'option/option.html', "browser_style": True}
background_scripts = []
for button, data in self._button_background_js.items():
name = 'background.js'
yield name, data
background_scripts.append(name)
for locale, name, data in self.locale_files(button_locales):
yield "_locales/{}/{}".format(locale.replace('-', '_'), name), data
def option_fix(match):
return "__MSG_{}__".format(message_name(match.group(1)))
for file_group, file_var in (
('popup', self.popup_files),
('option', self.option_files),
('files', self.extra_files)):
for name, path in file_var.items():
if name.endswith(".html"):
with open(path, 'r') as fp:
yield (os.path.join(file_group, name), re.sub('__MSG_(.*?)__', option_fix, fp.read()))
if 'background' in data:
manifest['background'] = data['background']
elif background_scripts:
manifest['background'] = {'scripts': background_scripts}
yield 'manifest.json', json.dumps(manifest, indent=4, sort_keys=True)
def get_files_names(self, settings):
for size in settings.get('icon_size'):
path = get_image(settings, size, settings.get("icon"))
yield (path, "icons/{}-{}".format(size, settings.get("icon")))
for name, path in self.extra_files.items():
manifiest = self._manifests.get(self.the_button)
if (not name.endswith('.xul') and not name.endswith('.html')
and (manifiest.get('files') is None
or name in manifiest.get('files'))):
yield (path, os.path.join('files', name))
for name, path in self.popup_files.items():
if not name.endswith(".html"):
yield (path, os.path.join('popup', name))
for name, path in self.option_files.items():
if not name.endswith(".html"):
yield (path, os.path.join('option', name))
if self.option_files or self.popup_files or self.extra_files:
yield os.path.join(settings.get('button_sdk_root'), 'templates', 'localise.js'), "localise.js"
def get_string(self, name, locale=None):
# we always return them here, because we are still in transition and they don't exist.
if name == "extensionName":
return self._name_of_ext
elif name == "extensionDescription":
return self._settings.get('description').strip()
if self.webx_locale:
result = self.get_web_ext_string(name, locale)
if result:
return result.get("message")
return super(WebExtensionButton, self).get_string(name, locale)
def meta_strings(self, name, locale):
if locale == self._settings.get("default_locale"):
if name == "extensionName":
return {
"message": self._name_of_ext,
"description": "Name of the extension."
}
elif name == "extensionDescription":
return {
"message": self._settings.get('description').strip(),
"description": "Description of the extension."
}
return
def get_web_ext_string(self, name, locale):
result = self.meta_strings(name, locale)
if result:
return result
result = self.webx_locale.get_string(name, locale)
if result:
return result
else:
result = self.webx_locale.get_string(message_name(name), locale)
if result:
return result
return None
def get_string_info(self, name, locale=None):
result = self.meta_strings(name, locale)
if result:
return result
if self.webx_locale:
result = self.get_web_ext_string(name, locale)
if result:
if not result.get("description"):
default_result = self.get_web_ext_string(name, self._settings.get("default_locale"))
result["description"] = default_result.get("description", "")
return result
string = self.get_string(name, locale)
return {
"description": "",
"message": string
}
def locale_files(self, button_locales, *args, **kwargs):
if self.webx_locale:
for locale in self.webx_locale.get_locales():
strings = {}
for string in self.get_locale_strings():
strings[message_name(string)] = self.get_string_info(string, locale)
yield locale, "messages.json", json.dumps(strings, sort_keys=True, indent=2)
else:
# this really amounts to importing from the old format
data = button_locales.get_string_dict(self.get_locale_strings(), self, untranslated=False)
for locale, values in data.items():
strings = {}
for string, value in values.items():
if isinstance(value, dict):
strings[message_name(string)] = value
else:
strings[message_name(string)] = {"message": value, "description": ""}
yield locale, "messages.json", json.dumps(strings, sort_keys=True, indent=2)
def get_locale_strings(self):
strings = {"extensionName", "extensionDescription"}
data = self._manifests.get(self.the_button)
if 'default_title' in data:
strings.add(data.get('default_title'))
if 'used_strings' in data:
strings.update(data.get('used_strings'))
if 'strings' in data:
for name, _ in data.get("strings"):
strings.add(name)
if 'messages' in data:
strings.update(data["messages"].keys())
for file_group in (self.popup_files, self.option_files, self.extra_files):
for name, path in file_group.items():
if name.endswith('.html'):
with open(path, 'r') as fp:
for match in re.finditer('__MSG_(.*?)__', fp.read()):
strings.add(match.group(1))
return strings
|
mit
|
glaubitz/fs-uae-debian
|
launcher/fsgs/SignalContext.py
|
2
|
1928
|
import weakref
from fsbc.signal import Signal
from .contextaware import ContextAware
class SignalBehavior:
def __init__(self, context, parent, names):
parent.__signal_enable_behavior = self
self._context = context
self._parent = weakref.ref(parent)
self._names = set(names)
try:
parent.destroyed.connect(self.on_parent_destroyed)
except AttributeError:
print(
"WARNING:SignalBehavior without remove_listener "
"implementation"
)
for signal in self._names:
self._context.connect(
signal, getattr(self._parent(), "on_{}_signal".format(signal))
)
# self._context.connect(signal, self._parent())
def on_parent_destroyed(self):
for signal in self._names:
self._context.disconnect(
signal, getattr(self._parent(), "on_{}_signal".format(signal))
)
# self._context.disconnect(signal, self._parent())
# noinspection PyMethodMayBeStatic
class SignalContext(ContextAware):
def __init__(self, context):
ContextAware.__init__(self, context)
def signal_name(self, signal):
# FIXME: use fsgs-context-instance-specific signals
# return "fsgs:{}{}".format(id(signal), signal)
return "fsgs:{}".format(signal)
def connect(self, signal, listener):
Signal(self.signal_name(signal)).connect(listener)
def disconnect(self, signal, listener):
Signal(self.signal_name(signal)).disconnect(listener)
def emit(self, signal, args):
Signal(self.signal_name(signal)).notify(*args)
# FIXME: Deprecated
def notify(self, signal, args):
return self.emit(signal, args)
def process(self):
Signal.process_all_signals()
def add_behavior(self, parent, names):
SignalBehavior(self, parent, names)
|
gpl-2.0
|
jimsmith80/android_kernel_zte_warplte
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
11180
|
2058
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
gpl-2.0
|
openplans/streetscore
|
street_score/project/migrations/0008_auto__add_userinfo.py
|
1
|
4037
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserInfo'
db.create_table('project_userinfo', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('lat', self.gf('django.db.models.fields.FloatField')(null=True)),
('lon', self.gf('django.db.models.fields.FloatField')(null=True)),
('location_source', self.gf('django.db.models.fields.CharField')(max_length=32)),
('location_data', self.gf('django.db.models.fields.CharField')(max_length=2048)),
('session', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['sessions.Session'], unique=True)),
))
db.send_create_signal('project', ['UserInfo'])
def backwards(self, orm):
# Deleting model 'UserInfo'
db.delete_table('project_userinfo')
models = {
'project.criterion': {
'Meta': {'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'prompt': ('django.db.models.fields.TextField', [], {})
},
'project.place': {
'Meta': {'object_name': 'Place'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lon': ('django.db.models.fields.FloatField', [], {})
},
'project.rating': {
'Meta': {'object_name': 'Rating'},
'created_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': "orm['project.Criterion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['project.Place']"}),
'place2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['project.Place']"}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'project.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'created_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'location_data': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'location_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'session': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sessions.Session']", 'unique': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'sessions.session': {
'Meta': {'object_name': 'Session', 'db_table': "'django_session'"},
'expire_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'session_data': ('django.db.models.fields.TextField', [], {}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'})
}
}
complete_apps = ['project']
|
mit
|
ejpbruel/servo
|
components/style/properties/build.py
|
17
|
2862
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os.path
import sys
BASE = os.path.dirname(__file__.replace('\\', '/'))
sys.path.insert(0, os.path.join(BASE, "Mako-0.9.1.zip"))
sys.path.insert(0, BASE) # For importing `data.py`
from mako import exceptions
from mako.lookup import TemplateLookup
from mako.template import Template
import data
def main():
usage = "Usage: %s [ servo | gecko ] [ style-crate | html ]" % sys.argv[0]
if len(sys.argv) < 3:
abort(usage)
product = sys.argv[1]
output = sys.argv[2]
if product not in ["servo", "gecko"] or output not in ["style-crate", "geckolib", "html"]:
abort(usage)
properties = data.PropertiesData(product=product)
rust = render(os.path.join(BASE, "properties.mako.rs"), product=product, data=properties)
if output == "style-crate":
write(os.environ["OUT_DIR"], "properties.rs", rust)
if product == "gecko":
template = os.path.join(BASE, "gecko.mako.rs")
rust = render(template, data=properties)
write(os.environ["OUT_DIR"], "gecko_properties.rs", rust)
elif output == "html":
write_html(properties)
def abort(message):
sys.stderr.write(message + b"\n")
sys.exit(1)
def render(filename, **context):
try:
lookup = TemplateLookup(directories=[BASE])
template = Template(open(filename, "rb").read(),
filename=filename,
input_encoding="utf8",
lookup=lookup,
strict_undefined=True)
# Uncomment to debug generated Python code:
# write("/tmp", "mako_%s.py" % os.path.basename(filename), template.code)
return template.render(**context).encode("utf8")
except:
# Uncomment to see a traceback in generated Python code:
# raise
abort(exceptions.text_error_template().render().encode("utf8"))
def write(directory, filename, content):
if not os.path.exists(directory):
os.makedirs(directory)
open(os.path.join(directory, filename), "wb").write(content)
def write_html(properties):
properties = dict(
(p.name, {
"flag": p.experimental,
"shorthand": hasattr(p, "sub_properties")
})
for p in properties.longhands + properties.shorthands
)
doc_servo = os.path.join(BASE, "..", "..", "..", "target", "doc", "servo")
html = render(os.path.join(BASE, "properties.html.mako"), properties=properties)
write(doc_servo, "css-properties.html", html)
write(doc_servo, "css-properties.json", json.dumps(properties, indent=4))
if __name__ == "__main__":
main()
|
mpl-2.0
|
mapr/hue
|
desktop/core/ext-py/Django-1.6.10/django/contrib/gis/db/backends/mysql/compiler.py
|
120
|
1089
|
from django.contrib.gis.db.models.sql.compiler import GeoSQLCompiler as BaseGeoSQLCompiler
from django.db.backends.mysql import compiler
SQLCompiler = compiler.SQLCompiler
class GeoSQLCompiler(BaseGeoSQLCompiler, SQLCompiler):
def resolve_columns(self, row, fields=()):
"""
Integrate the cases handled both by the base GeoSQLCompiler and the
main MySQL compiler (converting 0/1 to True/False for boolean fields).
Refs #15169.
"""
row = BaseGeoSQLCompiler.resolve_columns(self, row, fields)
return SQLCompiler.resolve_columns(self, row, fields)
class SQLInsertCompiler(compiler.SQLInsertCompiler, GeoSQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, GeoSQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, GeoSQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, GeoSQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, GeoSQLCompiler):
pass
class SQLDateTimeCompiler(compiler.SQLDateTimeCompiler, GeoSQLCompiler):
pass
|
apache-2.0
|
JayKickliter/gr-ieee802-15-4
|
python/qa_deinterleaver_ff.py
|
2
|
2345
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Felix Wunsch, Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT) <[email protected]>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import ieee802_15_4_swig as ieee802_15_4
from css_phy import physical_layer as phy
class qa_deinterleaver_ff (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
cfg = phy(slow_rate=False)
data_in = range(541) # some random prime number
self.src = blocks.vector_source_f(data_in)
self.intlv = ieee802_15_4.deinterleaver_ff(intlv_seq=())
self.snk = blocks.vector_sink_f(1)
self.tb.connect(self.src, self.intlv, self.snk)
self.tb.run ()
# check data
data_out = self.snk.data()
self.assertFloatTuplesAlmostEqual(data_in, data_out)
def test_002_t (self):
# set up fg
cfg = phy(slow_rate=True)
data_in = range(3*len(cfg.intlv_seq))
self.src = blocks.vector_source_f(data_in)
self.intlv = ieee802_15_4.deinterleaver_ff(intlv_seq=cfg.intlv_seq)
self.snk = blocks.vector_sink_f(1)
self.tb.connect(self.src, self.intlv, self.snk)
self.tb.run ()
# check data
data_out = self.snk.data()
ref = []
for n in range(3):
for i in range(len(cfg.intlv_seq)):
ref.append(data_in[n*len(cfg.intlv_seq)+cfg.intlv_seq[i]])
self.assertFloatTuplesAlmostEqual(ref, data_out)
if __name__ == '__main__':
gr_unittest.run(qa_deinterleaver_ff)
|
gpl-3.0
|
enthought/traitsgui
|
enthought/pyface/image_button.py
|
1
|
10217
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
#
# Description: Image and text-based pyface button/toolbar/radio button control.
#
#------------------------------------------------------------------------------
""" An image and text-based control that can be used as a normal, radio or
toolbar button.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
import wx
from numpy import array, fromstring, reshape, ravel, dtype
from enthought.traits.api \
import Str, Range, Enum, Instance, Event, false
from widget \
import Widget
from image_resource \
import ImageResource
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
# Text color used when a button is disabled:
DisabledTextColor = wx.Colour( 128, 128, 128 )
#-------------------------------------------------------------------------------
# 'ImageButton' class:
#-------------------------------------------------------------------------------
class ImageButton ( Widget ):
""" An image and text-based control that can be used as a normal, radio or
toolbar button.
"""
# Pens used to draw the 'selection' marker:
_selectedPenDark = wx.Pen(
wx.SystemSettings_GetColour( wx.SYS_COLOUR_3DSHADOW ), 1, wx.SOLID
)
_selectedPenLight = wx.Pen(
wx.SystemSettings_GetColour( wx.SYS_COLOUR_3DHIGHLIGHT ), 1, wx.SOLID
)
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# The image:
image = Instance( ImageResource, allow_none = True )
# The (optional) label:
label = Str
# Extra padding to add to both the left and right sides:
width_padding = Range( 0, 31, 7 )
# Extra padding to add to both the top and bottom sides:
height_padding = Range( 0, 31, 5 )
# Presentation style:
style = Enum( 'button', 'radio', 'toolbar', 'checkbox' )
# Orientation of the text relative to the image:
orientation = Enum( 'vertical', 'horizontal' )
# Is the control selected ('radio' or 'checkbox' style)?
selected = false
# Fired when a 'button' or 'toolbar' style control is clicked:
clicked = Event
#---------------------------------------------------------------------------
# Initializes the object:
#---------------------------------------------------------------------------
def __init__ ( self, parent, **traits ):
""" Creates a new image control.
"""
self._image = None
super( ImageButton, self ).__init__( **traits )
# Calculate the size of the button:
idx = idy = tdx = tdy = 0
if self._image is not None:
idx = self._image.GetWidth()
idy = self._image.GetHeight()
if self.label != '':
dc = wx.ScreenDC()
dc.SetFont( wx.NORMAL_FONT )
tdx, tdy = dc.GetTextExtent( self.label )
wp2 = self.width_padding + 2
hp2 = self.height_padding + 2
if self.orientation == 'horizontal':
self._ix = wp2
spacing = (idx > 0) * (tdx > 0) * 4
self._tx = self._ix + idx + spacing
dx = idx + tdx + spacing
dy = max( idy, tdy )
self._iy = hp2 + ((dy - idy) / 2)
self._ty = hp2 + ((dy - tdy) / 2)
else:
self._iy = hp2
spacing = (idy > 0) * (tdy > 0) * 2
self._ty = self._iy + idy + spacing
dx = max( idx, tdx )
dy = idy + tdy + spacing
self._ix = wp2 + ((dx - idx) / 2)
self._tx = wp2 + ((dx - tdx) / 2)
# Create the toolkit-specific control:
self._dx = dx + wp2 + wp2
self._dy = dy + hp2 + hp2
self.control = wx.Window( parent, -1,
size = wx.Size( self._dx, self._dy ) )
self.control._owner = self
self._mouse_over = self._button_down = False
# Set up mouse event handlers:
wx.EVT_ENTER_WINDOW( self.control, self._on_enter_window )
wx.EVT_LEAVE_WINDOW( self.control, self._on_leave_window )
wx.EVT_LEFT_DOWN( self.control, self._on_left_down )
wx.EVT_LEFT_UP( self.control, self._on_left_up )
wx.EVT_PAINT( self.control, self._on_paint )
#---------------------------------------------------------------------------
# Handles the 'image' trait being changed:
#---------------------------------------------------------------------------
def _image_changed ( self, image ):
self._image = self._mono_image = None
if image is not None:
self._img = image.create_image()
self._image = self._img.ConvertToBitmap()
if self.control is not None:
self.control.Refresh()
#---------------------------------------------------------------------------
# Handles the 'selected' trait being changed:
#---------------------------------------------------------------------------
def _selected_changed ( self, selected ):
""" Handles the 'selected' trait being changed.
"""
if selected and (self.style == 'radio'):
for control in self.control.GetParent().GetChildren():
owner = getattr( control, '_owner', None )
if (isinstance( owner, ImageButton ) and owner.selected and
(owner is not self)):
owner.selected = False
break
self.control.Refresh()
#-- wx event handlers ----------------------------------------------------------
def _on_enter_window ( self, event ):
""" Called when the mouse enters the widget. """
if self.style != 'button':
self._mouse_over = True
self.control.Refresh()
def _on_leave_window ( self, event ):
""" Called when the mouse leaves the widget. """
if self._mouse_over:
self._mouse_over = False
self.control.Refresh()
def _on_left_down ( self, event ):
""" Called when the left mouse button goes down on the widget. """
self._button_down = True
self.control.CaptureMouse()
self.control.Refresh()
def _on_left_up ( self, event ):
""" Called when the left mouse button goes up on the widget. """
control = self.control
control.ReleaseMouse()
self._button_down = False
wdx, wdy = control.GetClientSizeTuple()
x, y = event.GetX(), event.GetY()
control.Refresh()
if (0 <= x < wdx) and (0 <= y < wdy):
if self.style == 'radio':
self.selected = True
elif self.style == 'checkbox':
self.selected = not self.selected
else:
self.clicked = True
def _on_paint ( self, event ):
""" Called when the widget needs repainting.
"""
wdc = wx.PaintDC( self.control )
wdx, wdy = self.control.GetClientSizeTuple()
ox = (wdx - self._dx) / 2
oy = (wdy - self._dy) / 2
disabled = (not self.control.IsEnabled())
if self._image is not None:
image = self._image
if disabled:
if self._mono_image is None:
img = self._img
data = reshape(fromstring(img.GetData(), dtype('uint8')),
(-1, 3)) * array([[ 0.297, 0.589, 0.114 ]])
g = data[ :, 0 ] + data[ :, 1 ] + data[ :, 2 ]
data[ :, 0 ] = data[ :, 1 ] = data[ :, 2 ] = g
img.SetData(ravel(data.astype(dtype('uint8'))).tostring())
img.SetMaskColour(0, 0, 0)
self._mono_image = img.ConvertToBitmap()
self._img = None
image = self._mono_image
wdc.DrawBitmap( image, ox + self._ix, oy + self._iy, True )
if self.label != '':
if disabled:
wdc.SetTextForeground( DisabledTextColor )
wdc.SetFont( wx.NORMAL_FONT )
wdc.DrawText( self.label, ox + self._tx, oy + self._ty )
pens = [ self._selectedPenLight, self._selectedPenDark ]
bd = self._button_down
style = self.style
is_rc = (style in ( 'radio', 'checkbox' ))
if bd or (style == 'button') or (is_rc and self.selected):
if is_rc:
bd = 1 - bd
wdc.SetBrush( wx.TRANSPARENT_BRUSH )
wdc.SetPen( pens[ bd ] )
wdc.DrawLine( 1, 1, wdx - 1, 1 )
wdc.DrawLine( 1, 1, 1, wdy - 1 )
wdc.DrawLine( 2, 2, wdx - 2, 2 )
wdc.DrawLine( 2, 2, 2, wdy - 2 )
wdc.SetPen( pens[ 1 - bd ] )
wdc.DrawLine( wdx - 2, 2, wdx - 2, wdy - 1 )
wdc.DrawLine( 2, wdy - 2, wdx - 2, wdy - 2 )
wdc.DrawLine( wdx - 3, 3, wdx - 3, wdy - 2 )
wdc.DrawLine( 3, wdy - 3, wdx - 3, wdy - 3 )
elif self._mouse_over and (not self.selected):
wdc.SetBrush( wx.TRANSPARENT_BRUSH )
wdc.SetPen( pens[ bd ] )
wdc.DrawLine( 0, 0, wdx, 0 )
wdc.DrawLine( 0, 1, 0, wdy )
wdc.SetPen( pens[ 1 - bd ] )
wdc.DrawLine( wdx - 1, 1, wdx - 1, wdy )
wdc.DrawLine( 1, wdy - 1, wdx - 1, wdy - 1 )
|
bsd-3-clause
|
drmrd/ansible
|
lib/ansible/plugins/action/patch.py
|
107
|
2644
|
# (c) 2015, Brian Coca <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
from ansible.module_utils._text import to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
src = self._task.args.get('src', None)
remote_src = boolean(self._task.args.get('remote_src', 'no'), strict=False)
try:
if src is None:
raise AnsibleActionFail("src is required")
elif remote_src:
# everything is remote, so we just execute the module
# without changing any of the module arguments
raise _AnsibleActionDone(result=self._execute_module(task_vars=task_vars))
try:
src = self._find_needle('files', src)
except AnsibleError as e:
raise AnsibleActionFail(to_native(e))
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, os.path.basename(src))
self._transfer_file(src, tmp_src)
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
)
)
result.update(self._execute_module('patch', module_args=new_module_args, task_vars=task_vars))
except AnsibleAction as e:
result.update(e.result)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
|
gpl-3.0
|
yanchen036/tensorflow
|
tensorflow/python/kernel_tests/identity_n_op_py_test.py
|
58
|
2742
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for IdentityNOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class IdentityNOpTest(test.TestCase):
def testInt32String_6(self):
with self.test_session() as sess:
[value0, value1] = sess.run(
array_ops.identity_n([[1, 2, 3, 4, 5, 6],
[b"a", b"b", b"C", b"d", b"E", b"f", b"g"]]))
self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), value0)
self.assertAllEqual(
np.array([b"a", b"b", b"C", b"d", b"E", b"f", b"g"]), value1)
def testInt32_shapes(self):
with self.test_session() as sess:
inp0 = constant_op.constant([10, 20, 30, 40, 50, 60], shape=[2, 3])
inp1 = constant_op.constant([11, 21, 31, 41, 51, 61], shape=[3, 2])
inp2 = constant_op.constant(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], shape=[5, 3])
[value0, value1,
value2] = sess.run(array_ops.identity_n([inp0, inp1, inp2]))
self.assertAllEqual(np.array([[10, 20, 30], [40, 50, 60]]), value0)
self.assertAllEqual(np.array([[11, 21], [31, 41], [51, 61]]), value1)
self.assertAllEqual(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]),
value2)
def testString(self):
source = [b"A", b"b", b"C", b"d", b"E", b"f"]
with self.test_session() as sess:
[value] = sess.run(array_ops.identity_n([source]))
self.assertAllEqual(source, value)
def testIdentityShape(self):
with self.test_session():
shape = [2, 3]
array_2x3 = [[1, 2, 3], [6, 5, 4]]
tensor = constant_op.constant(array_2x3)
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, array_ops.identity_n([tensor])[0].get_shape())
self.assertEquals(shape, array_ops.identity_n([array_2x3])[0].get_shape())
if __name__ == "__main__":
test.main()
|
apache-2.0
|
mbkumar/pymatgen
|
setup.py
|
1
|
9254
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Setup.py for pymatgen."""
import sys
import platform
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
"""Extension builder that checks for numpy before install."""
def finalize_options(self):
"""Override finalize_options."""
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import importlib
import numpy
importlib.reload(numpy)
self.include_dirs.append(numpy.get_include())
extra_link_args = []
if sys.platform.startswith('win') and platform.machine().endswith('64'):
extra_link_args.append('-Wl,--allow-multiple-definition')
cpp_extra_link_args = extra_link_args
cpp_extra_compile_args = ["-Wno-cpp", "-Wno-unused-function", "-O2", "-march=native", '-std=c++0x']
if sys.platform.startswith('darwin'):
cpp_extra_compile_args.append("-stdlib=libc++")
cpp_extra_link_args = ["-O2", "-march=native", '-stdlib=libc++']
# https://docs.microsoft.com/en-us/cpp/build/reference/compiler-options-listed-alphabetically?view=vs-2017
if sys.platform.startswith('win'):
cpp_extra_compile_args = ['/w', '/O2', '/std:c++0x']
cpp_extra_link_args = extra_link_args
long_desc = """
Official docs: [http://pymatgen.org](http://pymatgen.org/)
Pymatgen (Python Materials Genomics) is a robust, open-source Python library
for materials analysis. These are some of the main features:
1. Highly flexible classes for the representation of Element, Site, Molecule,
Structure objects.
2. Extensive input/output support, including support for
[VASP](http://cms.mpi.univie.ac.at/vasp/), [ABINIT](http://www.abinit.org/),
CIF, Gaussian, XYZ, and many other file formats.
3. Powerful analysis tools, including generation of phase diagrams, Pourbaix
diagrams, diffusion analyses, reactions, etc.
4. Electronic structure analyses, such as density of states and band structure.
5. Integration with the Materials Project REST API.
Pymatgen is free to use. However, we also welcome your help to improve this
library by making your own contributions. These contributions can be in the
form of additional tools or modules you develop, or feature requests and bug
reports. Please report any bugs and issues at pymatgen's [Github page]
(https://github.com/materialsproject/pymatgen). For help with any pymatgen
issues, please use the [Discourse page](https://discuss.matsci.org/c/pymatgen).
Why use pymatgen?
=================
There are many materials analysis codes out there, both commerical and free,
but pymatgen offer several advantages:
1. **It is (fairly) robust.** Pymatgen is used by thousands of researchers,
and is the analysis code powering the [Materials Project](https://www.materialsproject.org).
The analysis it produces survives rigorous scrutiny every single day. Bugs
tend to be found and corrected quickly. Pymatgen also uses
[CircleCI](https://circleci.com) and [Appveyor](https://www.appveyor.com/)
for continuous integration on the Linux and Windows platforms,
respectively, which ensures that every commit passes a comprehensive suite
of unittests.
2. **It is well documented.** A fairly comprehensive documentation has been
written to help you get to grips with it quickly.
3. **It is open.** You are free to use and contribute to pymatgen. It also means
that pymatgen is continuously being improved. We will attribute any code you
contribute to any publication you specify. Contributing to pymatgen means
your research becomes more visible, which translates to greater impact.
4. **It is fast.** Many of the core numerical methods in pymatgen have been
optimized by vectorizing in numpy/scipy. This means that coordinate
manipulations are extremely fast and are in fact comparable to codes
written in other languages. Pymatgen also comes with a complete system for
handling periodic boundary conditions.
5. **It will be around.** Pymatgen is not a pet research project. It is used in
the well-established Materials Project. It is also actively being developed
and maintained by the [Materials Virtual Lab](https://www.materialsvirtuallab.org),
the ABINIT group and many other research groups.
With effect from version 2019.1.1, pymatgen only supports Python 3.x. Users
who require Python 2.7 should install pymatgen v2018.x.
"""
setup(
name="pymatgen",
packages=find_packages(),
version="2020.7.3",
cmdclass={'build_ext': build_ext},
setup_requires=['numpy>=1.14.3', 'setuptools>=18.0'],
python_requires='>=3.6',
install_requires=["numpy>=1.14.3", "requests", "ruamel.yaml>=0.15.6",
"monty>=3.0.2", "scipy>=1.5.0",
"tabulate", "spglib>=1.9.9.44", "networkx>=2.2",
"matplotlib>=1.5", "palettable>=3.1.1", "sympy", "pandas",
"plotly>=4.5.0"],
extras_require={
"provenance": ["pybtex"],
"ase": ["ase>=3.3"],
"vis": ["vtk>=6.0.0"],
"abinit": ["netcdf4"],
':python_version < "3.7"': [
"dataclasses>=0.6",
]},
package_data={
"pymatgen.core": ["*.json", "py.typed"],
"pymatgen.analysis": ["*.yaml", "*.json", "*.csv"],
"pymatgen.analysis.chemenv.coordination_environments.coordination_geometries_files": ["*.txt", "*.json"],
"pymatgen.analysis.chemenv.coordination_environments.strategy_files": ["*.json"],
"pymatgen.analysis.magnetism": ["*.json", "*.yaml"],
"pymatgen.analysis.structure_prediction": ["data/*.json", "*.yaml"],
"pymatgen.io": ["*.yaml"],
"pymatgen.io.vasp": ["*.yaml", "*.json"],
"pymatgen.io.lammps": ["templates/*.*", "*.yaml"],
"pymatgen.io.lobster": ["lobster_basis/*.yaml"],
"pymatgen.io.feff": ["*.yaml"],
"pymatgen.symmetry": ["*.yaml", "*.json", "*.sqlite"],
"pymatgen.entries": ["*.yaml"],
"pymatgen.vis": ["ElementColorSchemes.yaml"],
"pymatgen.command_line": ["OxideTersoffPotentials"],
"pymatgen.analysis.defects": ["*.json"],
"pymatgen.analysis.diffraction": ["*.json"],
"pymatgen.util": ["structures/*.json"]},
author="Pymatgen Development Team",
author_email="[email protected]",
maintainer="Shyue Ping Ong, Matthew Horton",
maintainer_email="[email protected], [email protected]",
url="http://www.pymatgen.org",
license="MIT",
description="Python Materials Genomics is a robust materials "
"analysis code that defines core object representations for "
"structures and molecules with support for many electronic "
"structure codes. It is currently the core analysis code "
"powering the Materials Project "
"(https://www.materialsproject.org).",
long_description=long_desc,
long_description_content_type='text/markdown',
keywords=["VASP", "gaussian", "ABINIT", "nwchem", "qchem", "materials", "science",
"project", "electronic", "structure", "analysis", "phase", "diagrams",
"crystal"],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"
],
ext_modules=[Extension("pymatgen.optimization.linear_assignment",
["pymatgen/optimization/linear_assignment.c"],
extra_link_args=extra_link_args),
Extension("pymatgen.util.coord_cython",
["pymatgen/util/coord_cython.c"],
extra_link_args=extra_link_args),
Extension("pymatgen.optimization.neighbors",
["pymatgen/optimization/neighbors.cpp"],
extra_compile_args=cpp_extra_compile_args,
extra_link_args=cpp_extra_link_args,
language='c++')],
entry_points={
'console_scripts': [
'pmg = pymatgen.cli.pmg:main',
'feff_input_generation = pymatgen.cli.feff_input_generation:main',
'feff_plot_cross_section = pymatgen.cli.feff_plot_cross_section:main',
'feff_plot_dos = pymatgen.cli.feff_plot_dos:main',
'gaussian_analyzer = pymatgen.cli.gaussian_analyzer:main',
'get_environment = pymatgen.cli.get_environment:main',
]
}
)
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.