repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1-systems/kiwi | kiwi/tasks/result_bundle.py | 1 | 7145 | # Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
"""
usage: kiwi result bundle -h | --help
kiwi result bundle --target-dir=<directory> --id=<bundle_id> --bundle-dir=<directory>
[--zsync-source=<download_location>]
kiwi result bundle help
commands:
bundle
create result bundle from the image build results in the
specified target directory. Each result image will contain
the specified bundle identifier as part of its filename.
Uncompressed image files will also become xz compressed
and a sha sum will be created from every result image.
options:
--bundle-dir=<directory>
directory to store the bundle results
--id=<bundle_id>
the bundle id. A free form text appended to the version
information of the result image filename
--target-dir=<directory>
the target directory to expect image build results
--zsync-source=<download_location>
specify the download location from which the bundle file(s)
can be fetched from. The information is effective if zsync is
used to sync the bundle. The zsync control file is only created
for those bundle files which are marked for compression because
in a kiwi build only those are meaningful for a partial binary
file download. It is expected that all files from a bundle
are placed to the same download location
"""
from collections import OrderedDict
import os
# project
from kiwi.tasks.base import CliTask
from kiwi.help import Help
from kiwi.system.result import Result
from kiwi.logger import log
from kiwi.path import Path
from kiwi.utils.compress import Compress
from kiwi.utils.checksum import Checksum
from kiwi.command import Command
from kiwi.exceptions import (
KiwiBundleError
)
class ResultBundleTask(CliTask):
"""
Implements result bundler
Attributes
* :attr:`manual`
Instance of Help
"""
def process(self):
"""
Create result bundle from the image build results in the
specified target directory. Each result image will contain
the specified bundle identifier as part of its filename.
Uncompressed image files will also become xz compressed
and a sha sum will be created from every result image
"""
self.manual = Help()
if self._help():
return
# load serialized result object from target directory
result_directory = os.path.abspath(self.command_args['--target-dir'])
bundle_directory = os.path.abspath(self.command_args['--bundle-dir'])
if result_directory == bundle_directory:
raise KiwiBundleError(
'Bundle directory must be different from target directory'
)
log.info(
'Bundle build results from %s', result_directory
)
result = Result.load(
result_directory + '/kiwi.result'
)
image_version = result.xml_state.get_image_version()
image_name = result.xml_state.xml_data.get_name()
ordered_results = OrderedDict(sorted(result.get_results().items()))
# hard link bundle files, compress and build checksum
if not os.path.exists(bundle_directory):
Path.create(bundle_directory)
for result_file in list(ordered_results.values()):
if result_file.use_for_bundle:
bundle_file_basename = os.path.basename(result_file.filename)
# The bundle id is only taken into account for image results
# which contains the image version appended in its file name
part_name = list(bundle_file_basename.partition(image_name))
bundle_file_basename = ''.join([
part_name[0], part_name[1],
part_name[2].replace(
image_version,
image_version + '-' + self.command_args['--id']
)
])
log.info('Creating %s', bundle_file_basename)
bundle_file = ''.join(
[bundle_directory, '/', bundle_file_basename]
)
Command.run(
[
'cp', result_file.filename, bundle_file
]
)
if self.runtime_config.is_bundle_compression_requested() and \
result_file.compress:
log.info('--> XZ compressing')
compress = Compress(bundle_file)
compress.xz(self.runtime_config.get_xz_options())
bundle_file = compress.compressed_filename
if self.command_args['--zsync-source'] and result_file.shasum:
# Files with a checksum are considered to be image files
# and are therefore eligible to be provided via the
# requested Partial/differential file download based on
# zsync
zsyncmake = Path.which('zsyncmake', access_mode=os.X_OK)
if zsyncmake:
log.info('--> Creating zsync control file')
Command.run(
[
zsyncmake, '-e', '-u', os.sep.join(
[
self.command_args['--zsync-source'],
os.path.basename(bundle_file)
]
), '-o', bundle_file + '.zsync', bundle_file
]
)
else:
log.warning(
'--> zsyncmake missing, zsync setup skipped'
)
if result_file.shasum:
log.info('--> Creating SHA 256 sum')
checksum = Checksum(bundle_file)
with open(bundle_file + '.sha256', 'w') as shasum:
shasum.write(
'{0} {1}'.format(
checksum.sha256(), bundle_file_basename
)
)
def _help(self):
if self.command_args['help']:
self.manual.show('kiwi::result::bundle')
else:
return False
return self.manual
| gpl-3.0 | 1,665,385,439,122,019,300 | 39.596591 | 92 | 0.56795 | false | 4.716172 | false | false | false |
jeonghoonkang/BerePi | apps/lcd_berepi/16x2_LCD_RGB.py | 1 | 6266 | #!/usr/bin/python
#-*- coding: utf-8 -*-
# Author : Matt Hawkins
# : ipmstyle, https://github.com/ipmstyle
# : jeonghoonkang, https://github.com/jeonghoonkang
# The wiring for the LCD is as follows:
# 1 : GND
# 2 : 5V
# 3 : Contrast (0-5V)*
# 4 : RS (Register Select)
# 5 : R/W (Read Write) - GROUND THIS PIN
# 6 : Enable or Strobe
# 7 : Data Bit 0 - NOT USED
# 8 : Data Bit 1 - NOT USED
# 9 : Data Bit 2 - NOT USED
# 10: Data Bit 3 - NOT USED
# 11: Data Bit 4
# 12: Data Bit 5
# 13: Data Bit 6
# 14: Data Bit 7
# 15: LCD Backlight +5V**
# 16: RED LCD Backlight (-)
# 17: GREEN LCD Backlight (-)
# 18: BLUE LCD Backlight (-)
import RPi.GPIO as GPIO
import time
# Define GPIO to LCD mapping
LCD_RS = 27
LCD_E = 22
LCD_D4 = 25
LCD_D5 = 24
LCD_D6 = 23
LCD_D7 = 12
#LED_ON = 4
LCD_RED = 4
LCD_GREEN = 17
LCD_BLUE = 7
# Define some device constants
LCD_WIDTH = 16 # Maximum characters per line
LCD_CHR = True
LCD_CMD = False
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
GPIO.setup(LCD_E, GPIO.OUT) # E
GPIO.setup(LCD_RS, GPIO.OUT) # RS
GPIO.setup(LCD_D4, GPIO.OUT) # DB4
GPIO.setup(LCD_D5, GPIO.OUT) # DB5
GPIO.setup(LCD_D6, GPIO.OUT) # DB6
GPIO.setup(LCD_D7, GPIO.OUT) # DB7
#GPIO.setup(LED_ON, GPIO.OUT) # Backlight enable
GPIO.setup(LCD_RED, GPIO.OUT) # RED Backlight enable
GPIO.setup(LCD_GREEN, GPIO.OUT) # GREEN Backlight enable
GPIO.setup(LCD_BLUE, GPIO.OUT) # BLUEBacklight enable
def main():
# Main program block
# Initialise display
lcd_init()
# Toggle backlight on-off-on
red_backlight(True)
time.sleep(1)
red_backlight(False)
time.sleep(1)
green_backlight(True)
time.sleep(1)
green_backlight(False)
time.sleep(1)
blue_backlight(True)
time.sleep(1)
blue_backlight(False)
time.sleep(1)
while True:
GPIO.output(LCD_RED, True)
GPIO.output(LCD_GREEN, True)
GPIO.output(LCD_BLUE, True)
lcd_string("Rasbperry Pi",LCD_LINE_1,2)
lcd_string(": RED",LCD_LINE_2,2)
red_backlight(False)
time.sleep(3) # 3 second delay
GPIO.output(LCD_RED, True)
GPIO.output(LCD_GREEN, True)
GPIO.output(LCD_BLUE, True)
lcd_string("Rasbperry Pi",LCD_LINE_1,2)
lcd_string(": GREEN",LCD_LINE_2,2)
green_backlight(False)
time.sleep(3) # 3 second delay
GPIO.output(LCD_RED, True)
GPIO.output(LCD_GREEN, True)
GPIO.output(LCD_BLUE, True)
blue_backlight(True)
lcd_string("Rasbperry Pi",LCD_LINE_1,2)
lcd_string(": BLUE",LCD_LINE_2,2)
blue_backlight(False)
time.sleep(3) # 3 second delay
"""
#- RED + GREEN
red_backlight(True)
green_backlight(True)
lcd_string("RED",LCD_LINE_1,2)
lcd_string("GREEN",LCD_LINE_2,2)
time.sleep(3)
green_backlight(False)
red_backlight(False)
time.sleep(0.5)
#- BLUE + GREEN
blue_backlight(True)
green_backlight(True)
lcd_string("BLUE",LCD_LINE_1,2)
lcd_string("GREEN",LCD_LINE_2,2)
time.sleep(3)
green_backlight(False)
blue_backlight(False)
#- RED + BLUE
red_backlight(True)
blue_backlight(True)
lcd_string("RED",LCD_LINE_1,2)
lcd_string("BLUE",LCD_LINE_2,2)
time.sleep(3)
blue_backlight(False)
red_backlight(False)
#- RED + GREEN + BLUE
red_backlight(True)
blue_backlight(True)
green_backlight(True)
lcd_string("RED, BLUE",LCD_LINE_1,2)
lcd_string("GREEN",LCD_LINE_2,2)
time.sleep(3)
red_backlight(False)
blue_backlight(False)
green_backlight(False)
time.sleep(5)
"""
def lcd_byte(bits, mode):
# Send byte to data pins
# bits = data
# mode = True for character
# False for command
GPIO.output(LCD_RS, mode) # RS
# High bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits&0x10==0x10:
GPIO.output(LCD_D4, True)
if bits&0x20==0x20:
GPIO.output(LCD_D5, True)
if bits&0x40==0x40:
GPIO.output(LCD_D6, True)
if bits&0x80==0x80:
GPIO.output(LCD_D7, True)
# Toggle 'Enable' pin
lcd_toggle_enable()
# Low bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits&0x01==0x01:
GPIO.output(LCD_D4, True)
if bits&0x02==0x02:
GPIO.output(LCD_D5, True)
if bits&0x04==0x04:
GPIO.output(LCD_D6, True)
if bits&0x08==0x08:
GPIO.output(LCD_D7, True)
# Toggle 'Enable' pin
lcd_toggle_enable()
def lcd_init():
# Initialise display
lcd_byte(0x33,LCD_CMD) # 110011 Initialise
lcd_byte(0x32,LCD_CMD) # 110010 Initialise
lcd_byte(0x06,LCD_CMD) # 000110 Cursor move direction
lcd_byte(0x0C,LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
lcd_byte(0x28,LCD_CMD) # 101000 Data length, number of lines, font size
lcd_byte(0x01,LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def lcd_clear():
lcd_byte(0x01,LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def lcd_toggle_enable():
# Toggle enable
time.sleep(E_DELAY)
GPIO.output(LCD_E, True)
time.sleep(E_PULSE)
GPIO.output(LCD_E, False)
time.sleep(E_DELAY)
def lcd_string(message,line,style):
# Send string to display
# style=1 Left justified
# style=2 Centred
# style=3 Right justified
if style==1:
message = message.ljust(LCD_WIDTH," ")
elif style==2:
message = message.center(LCD_WIDTH," ")
elif style==3:
message = message.rjust(LCD_WIDTH," ")
lcd_byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]),LCD_CHR)
#def lcd_backlight(flag):
# # Toggle backlight on-off-on
# GPIO.output(LED_ON, flag)
def red_backlight(flag):
# Toggle red-backlight on-off-on
GPIO.output(LCD_RED, flag)
def green_backlight(flag):
# Toggle green-backlight on-off-on
GPIO.output(LCD_GREEN, flag)
def blue_backlight(flag):
# Toggle blue-backlight on-off-on
GPIO.output(LCD_BLUE, flag)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
#lcd_byte(0x01, LCD_CMD)
#lcd_string("Goodbye!",LCD_LINE_1,2)
GPIO.cleanup()
| bsd-2-clause | -9,210,237,829,379,006,000 | 22.207407 | 73 | 0.646345 | false | 2.640539 | false | false | false |
BollMose/sensor | pms7003.py | 1 | 1949 | #encoding=utf-8
import os
import time
import serial
import sqlite3
from struct import *
def open_device(dev):
return serial.Serial(dev, baudrate=9600, timeout=2.0)
def close_device(ser):
ser.close()
def read_one_data(ser):
rv = b''
while True:
ch1 = ser.read()
if ch1 == b'\x42':
ch2 = ser.read()
if ch2 == b'\x4d':
rv += ch1 + ch2
rv += ser.read(32)
return rv
def read_native_pms(ser):
recv = read_one_data(ser)
length = unpack('>h', recv[2:4])[0]
if length != 28:
return (False, "the length of data is not equal 28.")
pms = unpack('>hhhhhhhhhhhhh', recv[4:30])
# check sum
check = unpack('>h', recv[30:32])[0]
sum = 0x42 + 0x4d + 28
for pm in pms:
sum += (pm & 0x00ff)
sum += ((pm & 0xff00)>>8)
if check != sum:
return (False, "check sum is not right, hope:actual, {}:{}".format(sum, check))
return (True, pms)
if __name__ == '__main__':
with open_device("/dev/ttyUSB0") as ser:
ret, pms = read_native_pms(ser)
ser.flushInput()
if ret == False:
print "read error: " , pms
print "version: ", (pms[12] & 0xff00)>>8
print "error code: ", (pms[12] & 0x00ff)
print(
'PM1.0(CF=1): {}\n'
'PM2.5(CF=1): {}\n'
'PM10 (CF=1): {}\n'
'PM1.0 (STD): {}\n'
'PM2.5 (STD): {}\n'
'PM10 (STD): {}\n'
'>0.3um : {}\n'
'>0.5um : {}\n'
'>1.0um : {}\n'
'>2.5um : {}\n'
'>5.0um : {}\n'
'>10um : {}\n'
.format(pms[0], pms[1], pms[2],
pms[3], pms[4], pms[5],
pms[6], pms[7], pms[8],
pms[9], pms[10], pms[11]))
| apache-2.0 | 3,337,298,386,798,929,000 | 27.661765 | 87 | 0.421755 | false | 3.069291 | false | false | false |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/enums/types/search_engine_results_page_type.py | 1 | 1203 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.enums',
marshal='google.ads.googleads.v7',
manifest={
'SearchEngineResultsPageTypeEnum',
},
)
class SearchEngineResultsPageTypeEnum(proto.Message):
r"""The type of the search engine results page. """
class SearchEngineResultsPageType(proto.Enum):
r"""The type of the search engine results page."""
UNSPECIFIED = 0
UNKNOWN = 1
ADS_ONLY = 2
ORGANIC_ONLY = 3
ADS_AND_ORGANIC = 4
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 2,275,997,382,339,844,900 | 29.846154 | 74 | 0.690773 | false | 3.868167 | false | false | false |
grbd/GBD.Build.BlackJack | blackjack/cmake/storage/SetList.py | 1 | 1558 | from blackjack.cmake.ScriptBase import ScriptBase
class SetList(ScriptBase):
"""
Represents a collection of source files to be passed to a Target
"""
def __init__(self, name: str, srcs: [] = None, parentscope: bool = False):
super().__init__()
self._Name = None
self.Name = name
"""Name of the Set"""
self.Srcs = srcs
"""List of Sources"""
self.ParentScope = parentscope
"""If to set the list within the parent scope"""
if self.Srcs is None: self.Srcs = []
return
@property
def Name(self):
"""Name of the Set"""
return self._Name
@Name.setter
def Name(self, value):
self._Name = value.replace(" ", "_")
return
def render_body(self):
from blackjack.cmake.cmd.cmake_set import cmake_set
ret = ["## Source Set"]
opts = ""
if self.ParentScope:
opts = "PARENT_SCOPE"
setcmd = cmake_set(self.Name, self.Srcs, opts)
ret += setcmd.render()
return ret
def add(self, items):
"""Add a single item or list of items"""
if isinstance(items, str):
self.Srcs.append(items)
if isinstance(items, list):
self.Srcs += items
if isinstance(items, SetList):
self.Srcs += items.Srcs
return
def add_spacesep(self, items_str):
"""Add a Space seperated list of items"""
tmparr = [str(i) for i in items_str.split()]
self.Srcs += tmparr
return
| apache-2.0 | 333,099,618,787,520,600 | 27.290909 | 78 | 0.548843 | false | 3.919395 | false | false | false |
tgerdes/toolbot | toolbot/adapter/shell.py | 1 | 1382 | import asyncio
import sys
from toolbot.adapter import Adapter
from toolbot.message import TextMessage
class ShellAdapter(Adapter):
def __init__(self, bot):
super().__init__(bot)
def send(self, envelope, *strings):
for string in strings:
# TODO: async print?
print("\x1b[01;32m{}\x1b[0m".format(string))
def emote(self, envelope, *strings):
self.send(envelope, *("* {}".format(string) for string in strings))
def reply(self, envelope, *strings):
self.send(envelope, *("{name}: {msg}".format(
name=envelope['user'].name,
msg=string) for string in strings))
@asyncio.coroutine
def input_loop(self, loop):
f = sys.stdin
fno = f.fileno()
q = asyncio.Queue()
def do_read():
q.put_nowait(f.readline())
loop.add_reader(fno, do_read)
while True:
print("{}> ".format(self.bot.name), end="")
sys.stdout.flush()
line = yield from q.get()
if not line:
print()
break
user = self.bot.brain.userForId(1, name="Shell", room="Shell")
self.receive(TextMessage(user, line, "messageId"))
loop.remove_reader(fno)
self.bot.loop.stop()
def run(self, loop):
asyncio.async(self.input_loop(loop))
| mit | -5,638,943,008,233,438,000 | 24.592593 | 75 | 0.552822 | false | 3.796703 | false | false | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/Pyevolve-0.6-py2.7.egg/pyevolve/Crossovers.py | 1 | 21075 | """
:mod:`Crossovers` -- crossover methods module
=====================================================================
In this module we have the genetic operators of crossover (or recombination) for each chromosome representation.
"""
from random import randint as rand_randint, choice as rand_choice
from random import random as rand_random
import math
import Util
import Consts
#############################
## 1D Binary String ##
#############################
def G1DBinaryStringXSinglePoint(genome, **args):
""" The crossover of 1D Binary String, Single Point
.. warning:: You can't use this crossover method for binary strings with length of 1.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The Binary String have one element, can't use the Single Point Crossover method !", TypeError)
cut = rand_randint(1, len(gMom)-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cut:] = gDad[cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cut:] = gMom[cut:]
return (sister, brother)
def G1DBinaryStringXTwoPoint(genome, **args):
""" The 1D Binary String crossover, Two Point
.. warning:: You can't use this crossover method for binary strings with length of 1.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The Binary String have one element, can't use the Two Point Crossover method !", TypeError)
cuts = [rand_randint(1, len(gMom)-1), rand_randint(1, len(gMom)-1)]
if cuts[0] > cuts[1]:
Util.listSwapElement(cuts, 0, 1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cuts[0]:cuts[1]] = gDad[cuts[0]:cuts[1]]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cuts[0]:cuts[1]] = gMom[cuts[0]:cuts[1]]
return (sister, brother)
def G1DBinaryStringXUniform(genome, **args):
""" The G1DList Uniform Crossover """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
for i in xrange(len(gMom)):
if Util.randomFlipCoin(Consts.CDefG1DBinaryStringUniformProb):
temp = sister[i]
sister[i] = brother[i]
brother[i] = temp
return (sister, brother)
####################
## 1D List ##
####################
def G1DListCrossoverSinglePoint(genome, **args):
""" The crossover of G1DList, Single Point
.. warning:: You can't use this crossover method for lists with just one element.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The 1D List have one element, can't use the Single Point Crossover method !", TypeError)
cut = rand_randint(1, len(gMom)-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cut:] = gDad[cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cut:] = gMom[cut:]
return (sister, brother)
def G1DListCrossoverTwoPoint(genome, **args):
""" The G1DList crossover, Two Point
.. warning:: You can't use this crossover method for lists with just one element.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The 1D List have one element, can't use the Two Point Crossover method !", TypeError)
cuts = [rand_randint(1, len(gMom)-1), rand_randint(1, len(gMom)-1)]
if cuts[0] > cuts[1]:
Util.listSwapElement(cuts, 0, 1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cuts[0]:cuts[1]] = gDad[cuts[0]:cuts[1]]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cuts[0]:cuts[1]] = gMom[cuts[0]:cuts[1]]
return (sister, brother)
def G1DListCrossoverUniform(genome, **args):
""" The G1DList Uniform Crossover """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
for i in xrange(len(gMom)):
if Util.randomFlipCoin(Consts.CDefG1DListCrossUniformProb):
temp = sister[i]
sister[i] = brother[i]
brother[i] = temp
return (sister, brother)
def G1DListCrossoverOX(genome, **args):
""" The OX Crossover for G1DList (order crossover) """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
listSize = len(gMom)
c1, c2 = [rand_randint(1, len(gMom)-1), rand_randint(1, len(gMom)-1)]
while c1 == c2:
c2 = rand_randint(1, len(gMom)-1)
if c1 > c2:
h = c1
c1 = c2
c2 = h
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
P1 = [ c for c in gMom[c2:] + gMom[:c2] if c not in gDad[c1:c2] ]
sister.genomeList = P1[listSize - c2:] + gDad[c1:c2] + P1[:listSize-c2]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
P2 = [ c for c in gDad[c2:] + gDad[:c2] if c not in gMom[c1:c2] ]
brother.genomeList = P2[listSize - c2:] + gMom[c1:c2] + P2[:listSize-c2]
assert listSize == len(sister)
assert listSize == len(brother)
return (sister, brother)
def G1DListCrossoverEdge(genome, **args):
""" THe Edge Recombination crossover for G1DList (widely used for TSP problem)
See more information in the `Edge Recombination Operator <http://en.wikipedia.org/wiki/Edge_recombination_operator>`_
Wikipedia entry.
"""
gMom, sisterl = args["mom"], []
gDad, brotherl = args["dad"], []
mom_edges, dad_edges, merge_edges = Util.G1DListGetEdgesComposite(gMom, gDad)
for c, u in (sisterl, set(gMom)), (brotherl, set(gDad)):
curr = None
for i in xrange(len(gMom)):
curr = rand_choice(tuple(u)) if not curr else curr
c.append(curr)
u.remove(curr)
d = [v for v in merge_edges.get(curr, []) if v in u]
if d: curr = rand_choice(d)
else:
s = [v for v in mom_edges.get(curr, []) if v in u]
s += [v for v in dad_edges.get(curr, []) if v in u]
curr = rand_choice(s) if s else None
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
sister.genomeList = sisterl
brother.genomeList = brotherl
return (sister, brother)
def G1DListCrossoverCutCrossfill(genome, **args):
""" The crossover of G1DList, Cut and crossfill, for permutations
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The 1D List have one element, can't use the Single Point Crossover method !", TypeError)
cut = rand_randint(1, len(gMom)-1)
if args["count"] >= 1:
sister = gMom.clone()
mother_part = gMom[0:cut]
sister.resetStats()
i = (len(sister) - cut)
x = 0
for v in gDad:
if v in mother_part: continue
if x >= i: break
sister[cut+x] = v
x += 1
if args["count"] == 2:
brother = gDad.clone()
father_part = gDad[0:cut]
brother.resetStats()
i = (len(brother) - cut)
x = 0
for v in gMom:
if v in father_part: continue
if x >= i: break
brother[cut+x] = v
x += 1
return (sister, brother)
def G1DListCrossoverRealSBX(genome, **args):
""" Experimental SBX Implementation - Follows the implementation in NSGA-II (Deb, et.al)
Some implementation `reference <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. warning:: This crossover method is Data Type Dependent, which means that
must be used for 1D genome of real values.
"""
EPS = Consts.CDefG1DListSBXEPS
# Crossover distribution index
eta_c = Consts.CDefG1DListSBXEtac
gMom = args["mom"]
gDad = args["dad"]
# Get the variable bounds ('gDad' could have been used; but I love Mom:-))
lb = gMom.getParam("rangemin", Consts.CDefRangeMin)
ub = gMom.getParam("rangemax", Consts.CDefRangeMax)
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
for i in range(0,len(gMom)):
if math.fabs(gMom[i]-gDad[i]) > EPS:
if gMom[i] > gDad[i]:
#swap
temp = gMom[i]
gMom[i] = gDad[i]
gDad[i] = temp
#random number betwn. 0 & 1
u = rand_random()
beta = 1.0 + 2*(gMom[i] - lb)/(1.0*(gDad[i]-gMom[i]))
alpha = 2.0 - beta**(-(eta_c+1.0))
if u <= (1.0/alpha):
beta_q = (u*alpha)**(1.0/((eta_c + 1.0)*1.0))
else:
beta_q = (1.0/(2.0-u*alpha))**(1.0/(1.0*(eta_c + 1.0)))
brother[i] = 0.5*((gMom[i] + gDad[i]) - beta_q*(gDad[i]-gMom[i]))
beta = 1.0 + 2.0*(ub - gDad[i])/(1.0*(gDad[i]-gMom[i]))
alpha = 2.0 - beta**(-(eta_c+1.0))
if u <= (1.0/alpha):
beta_q = (u*alpha)**(1.0/((eta_c + 1)*1.0))
else:
beta_q = (1.0/(2.0-u*alpha))**(1.0/(1.0*(eta_c + 1.0)))
sister[i] = 0.5*((gMom[i] + gDad[i]) + beta_q*(gDad[i]-gMom[i]))
if brother[i] > ub: brother[i] = ub
if brother[i] < lb: brother[i] = lb
if sister[i] > ub: sister[i] = ub
if sister[i] < lb: sister[i] = lb
if rand_random() > 0.5:
# Swap
temp = sister[i]
sister[i] = brother[i]
brother[i] = temp
else:
sister[i] = gMom[i]
brother[i] = gDad[i]
return (sister, brother)
####################
## 2D List ##
####################
def G2DListCrossoverUniform(genome, **args):
""" The G2DList Uniform Crossover """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
h, w = gMom.getSize()
for i in xrange(h):
for j in xrange(w):
if Util.randomFlipCoin(Consts.CDefG2DListCrossUniformProb):
temp = sister.getItem(i, j)
sister.setItem(i, j, brother.getItem(i, j))
brother.setItem(i, j, temp)
return (sister, brother)
def G2DListCrossoverSingleVPoint(genome, **args):
""" The crossover of G2DList, Single Vertical Point """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = rand_randint(1, gMom.getWidth()-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(sister.getHeight()):
sister[i][cut:] = gDad[i][cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][cut:] = gMom[i][cut:]
return (sister, brother)
def G2DListCrossoverSingleHPoint(genome, **args):
""" The crossover of G2DList, Single Horizontal Point """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = rand_randint(1, gMom.getHeight()-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(cut, sister.getHeight()):
sister[i][:] = gDad[i][:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][:] = gMom[i][:]
return (sister, brother)
#############################
## 2D Binary String ##
#############################
def G2DBinaryStringXUniform(genome, **args):
""" The G2DBinaryString Uniform Crossover
.. versionadded:: 0.6
The *G2DBinaryStringXUniform* function
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
h, w = gMom.getSize()
for i in xrange(h):
for j in xrange(w):
if Util.randomFlipCoin(Consts.CDefG2DBinaryStringUniformProb):
temp = sister.getItem(i, j)
sister.setItem(i, j, brother.getItem(i, j))
brother.setItem(i, j, temp)
return (sister, brother)
def G2DBinaryStringXSingleVPoint(genome, **args):
""" The crossover of G2DBinaryString, Single Vertical Point
.. versionadded:: 0.6
The *G2DBinaryStringXSingleVPoint* function
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = rand_randint(1, gMom.getWidth()-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(sister.getHeight()):
sister[i][cut:] = gDad[i][cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][cut:] = gMom[i][cut:]
return (sister, brother)
def G2DBinaryStringXSingleHPoint(genome, **args):
""" The crossover of G2DBinaryString, Single Horizontal Point
.. versionadded:: 0.6
The *G2DBinaryStringXSingleHPoint* function
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = rand_randint(1, gMom.getHeight()-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(cut, sister.getHeight()):
sister[i][:] = gDad[i][:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][:] = gMom[i][:]
return (sister, brother)
#############################
## Tree ##
#############################
def GTreeCrossoverSinglePoint(genome, **args):
""" The crossover for GTree, Single Point """
sister = None
brother = None
gMom = args["mom"].clone()
gDad = args["dad"].clone()
gMom.resetStats()
gDad.resetStats()
node_mom_stack = []
all_mom_nodes = []
node_mom_tmp = None
node_dad_stack = []
all_dad_nodes = []
node_dad_tmp = None
node_mom_stack.append(gMom.getRoot())
node_dad_stack.append(gDad.getRoot())
while (len(node_mom_stack) > 0) and (len(node_dad_stack) > 0):
node_mom_tmp = node_mom_stack.pop()
node_dad_tmp = node_dad_stack.pop()
if node_mom_tmp != gMom.getRoot():
all_mom_nodes.append(node_mom_tmp)
all_dad_nodes.append(node_dad_tmp)
node_mom_stack.extend(node_mom_tmp.getChilds())
node_dad_stack.extend(node_dad_tmp.getChilds())
if len(all_mom_nodes)==0 or len(all_dad_nodes)==0:
return (gMom, gDad)
if len(all_dad_nodes) == 1: nodeDad = all_dad_nodes[0]
else: nodeDad = rand_choice(all_dad_nodes)
if len(all_mom_nodes) == 1: nodeMom = all_mom_nodes[0]
else: nodeMom = rand_choice(all_mom_nodes)
nodeMom_parent = nodeMom.getParent()
nodeDad_parent = nodeDad.getParent()
# Sister
if args["count"] >= 1:
sister = gMom
nodeDad.setParent(nodeMom_parent)
nodeMom_parent.replaceChild(nodeMom, nodeDad)
sister.processNodes()
# Brother
if args["count"] == 2:
brother = gDad
nodeMom.setParent(nodeDad_parent)
nodeDad_parent.replaceChild(nodeDad, nodeMom)
brother.processNodes()
return (sister, brother)
def GTreeCrossoverSinglePointStrict(genome, **args):
""" The crossover of Tree, Strict Single Point
..note:: This crossover method creates offspring with restriction of the
*max_depth* parameter.
Accepts the *max_attempt* parameter, *max_depth* (required), and
the distr_leaft (>= 0.0 and <= 1.0), which represents the probability
of leaf selection when findin random nodes for crossover.
"""
sister = None
brother = None
gMom = args["mom"].clone()
gDad = args["dad"].clone()
gMom.resetStats()
gDad.resetStats()
max_depth = gMom.getParam("max_depth", None)
max_attempt = gMom.getParam("max_attempt", 10)
distr_leaf = gMom.getParam("distr_leaf", None)
if max_depth is None:
Util.raiseException("You must specify the max_depth genome parameter !", ValueError)
if max_depth < 0:
Util.raiseException("The max_depth must be >= 1, if you want to use GTreeCrossoverSinglePointStrict crossover !", ValueError)
momRandom = None
dadRandom = None
for i in xrange(max_attempt):
if distr_leaf is None:
dadRandom = gDad.getRandomNode()
momRandom = gMom.getRandomNode()
else:
if Util.randomFlipCoin(distr_leaf):
momRandom = gMom.getRandomNode(1)
else:
momRandom = gMom.getRandomNode(2)
if Util.randomFlipCoin(distr_leaf):
dadRandom = gDad.getRandomNode(1)
else:
dadRandom = gDad.getRandomNode(2)
assert momRandom is not None
assert dadRandom is not None
# Optimize here
mH = gMom.getNodeHeight(momRandom)
dH = gDad.getNodeHeight(dadRandom)
mD = gMom.getNodeDepth(momRandom)
dD = gDad.getNodeDepth(dadRandom)
# The depth of the crossover is greater than the max_depth
if (dD+mH <= max_depth) and (mD+dH <= max_depth):
break
if i == (max_attempt-1):
assert gMom.getHeight() <= max_depth
return (gMom, gDad)
else:
nodeMom, nodeDad = momRandom, dadRandom
nodeMom_parent = nodeMom.getParent()
nodeDad_parent = nodeDad.getParent()
# Sister
if args["count"] >= 1:
sister = gMom
nodeDad.setParent(nodeMom_parent)
if nodeMom_parent is None:
sister.setRoot(nodeDad)
else:
nodeMom_parent.replaceChild(nodeMom, nodeDad)
sister.processNodes()
assert sister.getHeight() <= max_depth
# Brother
if args["count"] == 2:
brother = gDad
nodeMom.setParent(nodeDad_parent)
if nodeDad_parent is None:
brother.setRoot(nodeMom)
else:
nodeDad_parent.replaceChild(nodeDad, nodeMom)
brother.processNodes()
assert brother.getHeight() <= max_depth
return (sister, brother)
#############################################################################
################# GTreeGP Crossovers ######################################
#############################################################################
def GTreeGPCrossoverSinglePoint(genome, **args):
""" The crossover of the GTreeGP, Single Point for Genetic Programming
..note:: This crossover method creates offspring with restriction of the
*max_depth* parameter.
Accepts the *max_attempt* parameter, *max_depth* (required).
"""
sister = None
brother = None
gMom = args["mom"].clone()
gDad = args["dad"].clone()
gMom.resetStats()
gDad.resetStats()
max_depth = gMom.getParam("max_depth", None)
max_attempt = gMom.getParam("max_attempt", 15)
if max_depth is None:
Util.raiseException("You must specify the max_depth genome parameter !", ValueError)
if max_depth < 0:
Util.raiseException("The max_depth must be >= 1, if you want to use GTreeCrossoverSinglePointStrict crossover !", ValueError)
momRandom = None
dadRandom = None
for i in xrange(max_attempt):
dadRandom = gDad.getRandomNode()
if dadRandom.getType() == Consts.nodeType["TERMINAL"]:
momRandom = gMom.getRandomNode(1)
elif dadRandom.getType() == Consts.nodeType["NONTERMINAL"]:
momRandom = gMom.getRandomNode(2)
mD = gMom.getNodeDepth(momRandom)
dD = gDad.getNodeDepth(dadRandom)
# Two nodes are root
if mD==0 and dD==0: continue
mH = gMom.getNodeHeight(momRandom)
if dD+mH > max_depth: continue
dH = gDad.getNodeHeight(dadRandom)
if mD+dH > max_depth: continue
break
if i==(max_attempt-1):
assert gMom.getHeight() <= max_depth
return (gMom, gDad)
else:
nodeMom, nodeDad = momRandom, dadRandom
nodeMom_parent = nodeMom.getParent()
nodeDad_parent = nodeDad.getParent()
# Sister
if args["count"] >= 1:
sister = gMom
nodeDad.setParent(nodeMom_parent)
if nodeMom_parent is None:
sister.setRoot(nodeDad)
else:
nodeMom_parent.replaceChild(nodeMom, nodeDad)
sister.processNodes()
assert sister.getHeight() <= max_depth
# Brother
if args["count"] == 2:
brother = gDad
nodeMom.setParent(nodeDad_parent)
if nodeDad_parent is None:
brother.setRoot(nodeMom)
else:
nodeDad_parent.replaceChild(nodeDad, nodeMom)
brother.processNodes()
assert brother.getHeight() <= max_depth
return (sister, brother)
| gpl-2.0 | 7,506,001,929,453,182,000 | 25.881378 | 131 | 0.584437 | false | 3.024975 | false | false | false |
stevearc/stiny | fabfile.py | 1 | 5084 | import os
import fabric.api as fab
import jinja2
import json
from fabric.context_managers import path
from fabric.decorators import roles
from pyramid.settings import aslist
from stiny.gutil import normalize_email
fab.env.roledefs = {
'door': ['[email protected]'],
'web': ['[email protected]'],
}
def _version():
return fab.local('git describe --tags', capture=True)
def _get_ref():
ref = fab.local('git rev-parse HEAD', capture=True)
return ref[:8]
def _get_var(key):
if key not in os.environ:
raise Exception("Missing environment variable %r" % key)
return os.environ[key]
CONSTANTS = {
'venv': '/envs/stiny',
'admins': [normalize_email(e) for e in aslist(_get_var('STINY_ADMINS'))],
'guests': [normalize_email(e) for e in aslist(_get_var('STINY_GUESTS'))],
'phone_access': _get_var('STINY_PHONE_ACCESS'),
'url_prefix': 'gen/' + _get_ref(),
'session': {
'encrypt_key': _get_var('STINY_ENCRYPT_KEY'),
'validate_key': _get_var('STINY_VALIDATE_KEY'),
},
'authtkt': {
'secret': _get_var('STINY_AUTH_SECRET'),
},
'google': {
'client_id': _get_var('STINY_PROD_CLIENT_GOOGLE_CLIENT_ID'),
'server_client_id': _get_var('STINY_SERVER_GOOGLE_CLIENT_ID'),
'server_client_secret': _get_var('STINY_SERVER_GOOGLE_CLIENT_SECRET'),
'calendar_id': _get_var('STINY_CAL_ID'),
},
'twilio': {
'auth_token': _get_var('STINY_TWILIO_AUTH_TOKEN'),
}
}
def _render(filename, **context):
with open(filename, 'r') as ifile:
tmpl = jinja2.Template(ifile.read())
basename = os.path.basename(filename)
fab.local('mkdir -p dist')
outfile = os.path.join('dist', basename)
with open(outfile, 'w') as ofile:
ofile.write(tmpl.render(**context))
return outfile
def _render_put(filename, dest, **kwargs):
rendered = _render(filename, **CONSTANTS)
fab.put(rendered, dest, **kwargs)
def write_credentials(filename):
from stiny.gutil import Calendar
google = CONSTANTS['google']
cal = Calendar(google['server_client_id'], google['server_client_secret'],
filename, calendar_id=google['calendar_id'])
cal.login_if_needed()
def build_web():
fab.local('npm install')
fab.local('rm -rf stiny/webpack')
fab.local('npm run flow')
fab.local('npm run build-prod')
version = _version()
fab.local("sed -i -e 's/version=.*/version=\"%s\",/' setup.py" % version)
write_credentials('stiny/credentials.dat')
fab.local('python setup.py sdist')
fab.local("sed -i -e 's/version=.*/version=\"develop\",/' setup.py")
_render('prod.ini.tmpl', **CONSTANTS)
print "Created dist/stiny-%s.tar.gz" % version
return version
@roles('web')
def deploy_web():
version = build_web()
tarball = "stiny-%s.tar.gz" % version
fab.put("dist/" + tarball)
fab.sudo("if [ ! -e {0} ]; then virtualenv {0}; fi"
.format(CONSTANTS['venv']))
with path(CONSTANTS['venv'] + '/bin', behavior='prepend'):
fab.sudo("yes | pip uninstall stiny || true")
fab.sudo("pip install pastescript")
fab.sudo("pip install %s" % tarball)
_render_put('prod.ini.tmpl', '/etc/emperor/stiny.ini', use_sudo=True)
@roles('door')
def build_rpi_gpio_wheel():
gpio_wheel = 'RPi.GPIO-0.6.2-cp27-cp27mu-linux_armv6l.whl'
fab.local('mkdir -p pex_wheels')
# Generate the RPI.GPIO wheel on the raspberry pi
fab.run('rm -rf /tmp/gpiobuild')
fab.run('mkdir -p /tmp/gpiobuild')
with fab.cd('/tmp/gpiobuild'):
fab.run('virtualenv venv')
with path('/tmp/gpiobuild/venv/bin', behavior='prepend'):
fab.run('pip install wheel')
fab.run('pip wheel RPi.GPIO==0.6.2 --wheel-dir=/tmp/gpiobuild')
fab.get(gpio_wheel, os.path.join('pex_wheels', gpio_wheel))
fab.run('rm -rf /tmp/gpiobuild')
def build_door():
fab.local('rm -f dist/stiny')
constants = ['STINY_SERVER_GOOGLE_CLIENT_ID',
'STINY_SERVER_GOOGLE_CLIENT_SECRET', 'STINY_CAL_ID']
config = {}
for key in constants:
config[key] = _get_var(key)
with open('stiny_worker/stiny_worker/config.json', 'w') as ofile:
json.dump(config, ofile)
write_credentials('stiny_worker/stiny_worker/credentials.dat')
gpio_wheel = 'RPi.GPIO-0.6.2-cp27-cp27mu-linux_armv6l.whl'
if not os.path.exists(os.path.join('pex_wheels', gpio_wheel)):
fab.execute(build_rpi_gpio_wheel)
fab.local('rm -f pex_cache/stiny_worker-develop-py2-none-any.whl')
fab.local('pex -vvvv --platform=linux_armv6l -f pex_wheels '
'--cache-dir=pex_cache '
'stiny_worker -m stiny_worker:main -o dist/stiny')
@roles('door')
def deploy_door():
build_door()
fab.put("dist/stiny")
fab.put("stiny-service", "/etc/init.d/stiny", use_sudo=True, mode=744)
fab.put("stiny-tunnel-service", "/etc/init.d/stiny-tunnel", use_sudo=True,
mode=744)
fab.sudo("service stiny-tunnel restart")
fab.sudo("service stiny restart")
| mit | 5,882,313,111,331,927,000 | 31.8 | 78 | 0.622148 | false | 3.037037 | false | false | false |
PetterKun/biblioteca | principal/views.py | 1 | 8531 | #encoding:utf-8
from principal.models import Obra, Video
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.mail import EmailMessage
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
from principal.forms import RegistroForm, ActivacionForm, ObraForm, VideoForm
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from principal.funciones import *
from django.db.models import Q
def inicio(request):
if not request.user.is_anonymous():
return HttpResponseRedirect('/perfil')
else:
return HttpResponseRedirect('/login')
def entrar(request):
estado = " "
if not request.user.is_anonymous():
return HttpResponseRedirect('/perfil')
if request.method == 'POST':
formulario = AuthenticationForm(request.POST)
if formulario.is_valid:
usuario = request.POST['username']
clave = request.POST['password']
acceso = authenticate(username=usuario, password=clave)
if acceso is not None:
if acceso.is_active and acceso.estado == 'a':
login(request, acceso)
return HttpResponseRedirect('/perfil')
else:
pin = generarPin()
acceso.pin = pin
acceso.save()
#titulo = 'Pin de activación - Akiba-Kei Asociación Juvenil'
#contenido = 'Tu pin es: ' + pin
#correo = EmailMessage(titulo, contenido, to=[acceso.email])
#correo.send()
return HttpResponseRedirect('/activar')
else:
estado = "El usuario y/o la contraseña son incorrectos."
else:
formulario = AuthenticationForm()
return render_to_response('login.html',
{
'formulario':formulario,
'estado':estado
},
context_instance=RequestContext(request)
)
@login_required(login_url='/login')
def salir(request):
logout(request)
return HttpResponseRedirect('/')
@login_required(login_url='/login')
def perfil(request):
usuario = request.user
return render_to_response('perfil.html',
{'usuario':usuario},
context_instance=RequestContext(request)
)
def registro(request):
if request.method == 'POST':
formulario = RegistroForm(request.POST, request.FILES)
if formulario.is_valid():
usuario = formulario.cleaned_data['username']
email = formulario.cleaned_data['email']
password_one = formulario.cleaned_data['password_one']
password_two = formulario.cleaned_data['password_two']
first_name = formulario.cleaned_data['first_name']
last_name = formulario.cleaned_data['last_name']
sexo = formulario.cleaned_data['sexo']
dni = formulario.cleaned_data['dni']
fecha_nacimiento = formulario.cleaned_data['fecha_nacimiento']
direccion = formulario.cleaned_data['direccion']
cp = formulario.cleaned_data['cp']
poblacion = formulario.cleaned_data['poblacion']
provincia = formulario.cleaned_data['provincia']
telefono = formulario.cleaned_data['telefono']
foto = formulario.cleaned_data['foto']
twitter = formulario.cleaned_data['twitter']
facebook = formulario.cleaned_data['facebook']
u = User.objects.create_user(username=usuario, email=email, password=password_one)
u.first_name = first_name
u.last_name = last_name
u.sexo = sexo
u.dni = dni
u.fecha_nacimiento = fecha_nacimiento
u.direccion = direccion
u.cp = cp
u.poblacion = poblacion
u.provincia = provincia
u.telefono = telefono
u.foto = foto
u.twitter = twitter
u.facebook = facebook
u.save()
return HttpResponseRedirect('/login')
else:
formulario = RegistroForm()
return render_to_response('registro.html',
{'formulario':formulario},
context_instance=RequestContext(request))
def activacion(request):
estado = ""
if not request.user.is_anonymous():
return HttpResponseRedirect('/perfil')
if request.method == 'POST':
formulario = ActivacionForm(request.POST)
if formulario.is_valid():
usuario = formulario.cleaned_data['username']
password = formulario.cleaned_data['password']
pin = formulario.cleaned_data['pin']
acceso = authenticate(username=usuario, password=password)
if acceso is not None:
if acceso.pin == pin:
acceso.is_active = True
acceso.estado = 'a'
acceso.save()
return HttpResponseRedirect('/login')
else:
estado = "El pin introducido es incorrecto, por favor intentelo de nuevo."
pin = generarPin()
acceso.pin = pin
acceso.save()
print pin
#titulo = 'Pin de activación - Akiba-Kei Asociación Juvenil'
#contenido = 'Tu pin es: ' + pin
#correo = EmailMessage(titulo, contenido, to=[acceso.email])
#correo.send()
else:
estado = "El usuario y/o la contraseña son incorrectas."
else:
formulario = ActivacionForm()
return render_to_response('activacion.html',
{
'formulario': formulario,
'estado':estado
},
context_instance=RequestContext(request)
)
@staff_member_required
def insertarObra(request):
if request.method == "POST":
formulario = ObraForm(request.POST, request.FILES)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect('/agregarObra')
else:
formulario = ObraForm()
return render_to_response('agregarobra.html',
{'formulario':formulario},
context_instance=RequestContext(request)
)
@login_required(login_url='/login')
def buscarObra(request):
query_q = request.GET.get('q', '')
query_s = request.GET.get('s', '')
if query_q and query_s:
if query_s == 'titulo':
qset = Q(titulo__icontains = query_q)
elif query_s == 'autor':
qset = Q(autor__icontains = query_q)
elif query_s == 'editorial':
qset = Q(editorial__icontains = query_q)
elif query_s == 'genero':
qset = Q(genero__icontains = query_q)
elif query_s == 'palabra_clave':
qset = Q(palabra_clave__icontains = query_q)
resultados = Obra.objects.filter(qset)
else:
resultados = []
return render_to_response('busquedaObra.html',
{
'resultados':resultados,
'query_q':query_q,
'query_s':query_s
},
context_instance=RequestContext(request)
)
def detalleObra(request, id_obra):
dato = get_object_or_404(Obra, pk=id_obra)
return render_to_response('obra.html',
{'obra':dato},
context_instance=RequestContext(request)
) | apache-2.0 | -5,421,392,188,126,946,000 | 39.794258 | 94 | 0.535484 | false | 4.216123 | false | false | false |
ksachs/invenio | modules/bibauthorid/lib/bibauthorid_templates.py | 1 | 142829 | ## -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Bibauthorid HTML templates"""
# pylint: disable=W0105
# pylint: disable=C0301
# from cgi import escape
# from urllib import quote
#
import invenio.bibauthorid_config as bconfig
from invenio.config import CFG_SITE_LANG, CFG_ETCDIR
from invenio.config import CFG_SITE_URL, CFG_SITE_SECURE_URL, CFG_BASE_URL, CFG_INSPIRE_SITE
from invenio.config import CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL, CFG_WEBAUTHORPROFILE_CFG_HEPNAMES_EMAIL
from invenio.bibformat import format_record
from invenio.session import get_session
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibauthorid_config import PERSONID_EXTERNAL_IDENTIFIER_MAP, CREATE_NEW_PERSON, CFG_BIBAUTHORID_ENABLED, \
BIBAUTHORID_CFG_SITE_NAME
from invenio.bibauthorid_webapi import get_person_redirect_link, get_canonical_id_from_person_id, \
get_person_names_from_id, get_person_info_by_pid
from invenio.bibauthorid_frontinterface import get_uid_of_author
from invenio.bibauthorid_frontinterface import get_bibrefrec_name_string
from invenio.bibauthorid_frontinterface import get_canonical_name_of_author
from invenio.messages import gettext_set_language, wash_language
from invenio.webuser import get_email
from invenio.htmlutils import escape_html
from jinja2 import Environment, FileSystemLoader, TemplateNotFound
from invenio.bibauthorid_webutils import group_format_number
from invenio.websearch_templates import tmpl_citesummary_get_link
from invenio.websearch_templates import tmpl_citesummary_get_link_for_rep_breakdown
# from invenio.textutils import encode_for_xml
class WebProfileMenu():
def get_menu_items(self):
return self.menu
def _set_is_owner(self, is_owner):
if isinstance(is_owner, bool):
self.owner = is_owner
def _set_is_admin(self, is_admin):
if isinstance(is_admin, bool):
self.is_admin = is_admin
def _set_canonical_name(self, canonical_name):
if isinstance(canonical_name, str):
self.canonical_name = canonical_name
def _configure_localisation(self, ln):
self.localise = gettext_set_language(ln)
def _set_active_menu_item(self, current_page):
for item in self.menu:
if item['page'] == current_page:
item['active'] = True
def _get_standard_menu_items(self):
personalise = ""
if self.owner:
personalise = "Your "
menu = [
{
'page': "profile",
'text': "%s" % self.localise("View %sProfile" % personalise),
"static": False,
"active": False,
"canonical_name": self.canonical_name,
"disabled": self.canonical_name is ""
},
{
'page': "manage_profile",
'text': "%s" % self.localise("Manage %sProfile" % personalise),
'static': False,
'active': False,
"canonical_name": self.canonical_name,
"disabled": self.canonical_name is ""
},
{
'page': "claim",
'text': "%s" % self.localise("Manage %sPublications" % personalise),
'static': False,
'active': False,
"canonical_name": self.canonical_name,
"disabled": self.canonical_name is ""
},
{
'page': "help",
'text': "%s" % self.localise("Help"),
'static': True,
'active': False,
}
]
return menu
def _get_admin_menu_items(self):
admin_menu_items = self._get_standard_menu_items()
open_tickets_item = {
'page': "claim/tickets_admin",
'text': "%s" % self.localise("Open Tickets"),
'static': True,
'active': False
}
admin_menu_items.append(open_tickets_item)
return list(admin_menu_items)
def _create_menu(self, current_page):
if self.is_admin:
self.menu = self._get_admin_menu_items()
else:
self.menu = self._get_standard_menu_items()
self._set_active_menu_item(current_page)
def __init__(self, canonical_name, current_page, ln, is_owner=False, is_admin=False):
self._configure_localisation(ln)
self._set_canonical_name(canonical_name)
self._set_is_owner(is_owner)
self._set_is_admin(is_admin)
self._create_menu(current_page)
class WebProfilePage():
TEMPLATES_DIR = "%s/bibauthorid/templates" % CFG_ETCDIR
loader = FileSystemLoader(TEMPLATES_DIR)
environment = Environment(loader=loader)
environment.filters['groupformat'] = group_format_number
def __init__(self, page, heading, no_cache=False):
self.css_dir = CFG_BASE_URL + "/css"
self.legacy_css_dir = CFG_BASE_URL + "/img"
self.img_dir = CFG_BASE_URL + "/img"
self.scripts_dir = CFG_BASE_URL + "/js"
self.url = CFG_BASE_URL + "/author"
self.scripts = [
"json3.min.js",
"jquery-ui.min.js",
"jquery.form.js",
"jquery.dataTables.min.js",
"jquery-lightbox/js/jquery.lightbox-0.5.js",
"jquery.omniwindow.js",
"spin.min.js",
"sly.min.js",
"parsley.js",
"bootstrap.min.js?g=c29c5bc",
"underscore-min.js",
"backbone.js",
"handlebars.js",
"author-handlebars-templates.js",
"bibauthorid.js?g=c29c5bc"
]
self.legacy_stylesheets = ["jquery-ui/themes/smoothness/jquery-ui.css",
"datatables_jquery-ui.css"]
self.stylesheets = [
"bootstrap.min.css?g=c29c5bc",
"bibauthorid.css?g=c29c5bc"
]
self.stylesheets = ["%s/%s" % (self.css_dir, item) for item in self.stylesheets]
self.stylesheets = self.stylesheets + \
["%s/%s" % (self.legacy_css_dir, item) for item in self.legacy_stylesheets]
self._initialise_class_variables()
self.no_cache = no_cache
self.heading = heading
self.page = page
self.bootstrap_data = None
def _initialise_class_variables(self):
self.menu = None
self.debug = None
def create_profile_menu(self, canonical_name, ln, is_owner=False, is_admin=False):
menu = WebProfileMenu(canonical_name, self.page, ln, is_owner, is_admin)
self.menu = menu.get_menu_items()
def add_profile_menu(self, menu):
self.menu = menu.get_menu_items()
def add_debug_info(self, debug):
self.debug = debug
def add_bootstrapped_data(self, data):
self.bootstrap_data = data
def get_head(self):
return WebProfilePage.environment.get_template("head.html").render({
'no_cache': self.no_cache,
'scripts': self.scripts,
'stylesheets': self.stylesheets,
'scripts_dir': self.scripts_dir
})
def get_body(self):
return WebProfilePage.environment.get_template("index.html").render({
'title': self.heading,
'menu': self.menu,
'url': self.url,
'debug': self.debug,
'bootstrap': self.bootstrap_data
})
@staticmethod
def _load_named_template(template):
environment = WebProfilePage.environment
if template is not "generic":
loaded_template = environment.get_template("%s.html" % str(template))
else:
loaded_template = environment.get_template("generic_wrapper.html")
return loaded_template
def _get_standard_author_page_parameters(self):
return {
'title': self.heading,
'menu': self.menu,
'url': self.url,
'debug': self.debug,
'bootstrap': self.bootstrap_data,
'search_form_url': "%s/author/search" % CFG_BASE_URL
}
def get_wrapped_body(self, template, content):
parameters = self._get_standard_author_page_parameters()
try:
loaded_template = self._load_named_template(template)
parameters.update(content)
except TemplateNotFound:
loaded_template = self._load_named_template("generic")
parameters.update({
'html': "Unable to load named template.<br>%s" % str(content)
})
return loaded_template.render(parameters)
@staticmethod
def render_template(template, content):
try:
loaded_template = WebProfilePage._load_named_template(template)
except TemplateNotFound:
return "Unable to load named template: %s.<br>%s" % (template, str(content))
return loaded_template.render(content)
@staticmethod
def render_citations_summary_content(citations, canonical_name):
def _get_breakdown_categories_bounds(pid):
"""
An example of a category string would be 'Famous papers (250-499')
This returns (250, 499) which are the lower and upper bound.
"""
bounds_str = category.split(')')[0].split('(')[1]
try:
return (int(bounds_str), 0)
except ValueError:
if '+' in bounds_str:
return (int(bounds_str.strip('+')), 1000000)
else:
return map(int, bounds_str.split('-'))
citeable_breakdown_queries = dict()
published_breakdown_queries = dict()
for category in citations['breakdown_categories']:
low, high = _get_breakdown_categories_bounds(category)
citeable_breakdown_queries[
category] = tmpl_citesummary_get_link_for_rep_breakdown(
canonical_name,
'author',
'collection:citeable',
'cited',
low,
high)
published_breakdown_queries[
category] = tmpl_citesummary_get_link_for_rep_breakdown(
canonical_name,
'author',
'collection:published',
'cited',
low,
high)
try:
result = WebProfilePage.environment.get_template("citations_summary.html").render({
'papers_num': citations['papers_num'],
'citeable': {'avg_cites': citations['data']['Citeable papers']['avg_cites'],
'num': len(citations['papers']['Citeable papers']),
'citations_num': citations['data']['Citeable papers']['total_cites'],
'h_index': citations['data']['Citeable papers']['h-index'],
'breakdown': citations['data']['Citeable papers']['breakdown'],
'breakdown_queries': citeable_breakdown_queries},
'published': {'avg_cites': citations['data']['Published only']['avg_cites'],
'num': len(citations['papers']['Published only']),
'citations_num': citations['data']['Published only']['total_cites'],
'h_index': citations['data']['Published only']['h-index'],
'breakdown': citations['data']['Published only']['breakdown'],
'breakdown_queries': published_breakdown_queries},
'breakdown_categories': citations['breakdown_categories'],
'hindex_fine_print_link': "%s/help/citation-metrics" % CFG_BASE_URL,
'citation_fine_print_link': "%s/help/citation-metrics" % CFG_BASE_URL,
'citeable_papers_link': tmpl_citesummary_get_link(canonical_name, 'author', 'collection:citeable'),
'selfcite_link': '%s/search?ln=en&p=author:%s&of=hcs2' % (CFG_BASE_URL, canonical_name),
'published_only_papers_link': tmpl_citesummary_get_link(canonical_name, 'author', 'collection:published'),
})
except:
result = "No citations data."
return result
@staticmethod
def render_publications_box_content(template_vars):
"""
Creates HTML Markup for Publications Box
@param **kwargs: A dictionary with at least the following keys:
internal_pubs
external_pubs
datasets
@return: HTML Markup
@rtype: str
"""
return WebProfilePage.environment.get_template("publications_box.html").render(template_vars)
def get_profile_page_body(self, last_computed, trial="default"):
if trial is not None or not trial == "default":
file_ext = "_" + str(trial)
else:
file_ext = str()
result = str()
try:
template = WebProfilePage.environment.get_template("profile_page%s.html" % file_ext)
except TemplateNotFound:
template = WebProfilePage.environment.get_template("profile_page.html")
result = "<!-- Failed to load template for trial: %s -->" % str(trial)
# The menu should not be visible if BAI is disabled.
if not CFG_BIBAUTHORID_ENABLED:
self.menu = None
return WebProfilePage.environment.get_template(template).render({
'title': self.heading,
'menu': self.menu,
'url': self.url,
'debug': self.debug,
'bootstrap': self.bootstrap_data,
'last_computed': last_computed,
'citation_fine_print_link': "%s/help/citation-metrics" % CFG_BASE_URL
}) + result
import xml.sax.saxutils
class Template:
"""Templating functions used by aid"""
def __init__(self, language=CFG_SITE_LANG):
"""Set defaults for all aid template output"""
self.language = language
self._ = gettext_set_language(wash_language(language))
def tmpl_person_detail_layout(self, content):
'''
writes HTML content into the person css container
@param content: HTML content
@type content: string
@return: HTML code
@rtype: string
'''
html = []
h = html.append
h('<div id="aid_person">')
h(content)
h('</div>')
return "\n".join(html)
def tmpl_merge_transaction_box(self, teaser_key, messages, show_close_btn=True):
'''
Creates a notification box based on the jQuery UI style
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param messages: list of keys to a dict which return the message to display in the box
@type messages: list of strings
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
transaction_teaser_dict = {'success': 'Success!',
'failure': 'Failure!'}
transaction_message_dict = {'confirm_success': '%s merge transaction%s successfully executed.',
'confirm_failure':
'%s merge transaction%s failed. This happened because there is at least one profile in the merging list that is either connected to a user or it has claimed papers.'
' Please edit the list accordingly.',
'confirm_operation': '%s merge transaction%s successfully ticketized.'}
teaser = self._(transaction_teaser_dict[teaser_key])
html = []
h = html.append
for key in transaction_message_dict.keys():
same_kind = [mes for mes in messages if mes == key]
trans_no = len(same_kind)
if trans_no == 0:
continue
elif trans_no == 1:
args = [trans_no, '']
else:
args = [trans_no, 's']
color = ''
if teaser_key == 'failure':
color = 'background: #FFC2C2;'
message = self._(transaction_message_dict[key] % tuple(args))
h('<div id="aid_notification_' + key + '" class="ui-widget ui-alert">')
h(' <div style="%s margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">' %
(color))
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s' % (teaser, message))
if show_close_btn:
h(
' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify" style="border-style: none;">X</a></span></p>')
h(' </div>')
h('</div>')
return "\n".join(html)
def tmpl_search_ticket_box(self, teaser_key, message_key, bibrefs, show_close_btn=False):
'''
Creates a box informing about a claim in progress for
the search.
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param message_key: key to a dict which returns the message to display in the box
@type message_key: string
@param bibrefs: bibrefs which are about to be assigned
@type bibrefs: list of strings
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
error_teaser_dict = {'person_search': 'Person search for assignment in progress!'}
error_message_dict = {'assign_papers': 'You are searching for a person to assign the following paper%s:'}
teaser = self._(error_teaser_dict[teaser_key])
arg = ''
if len(bibrefs) > 1:
arg = 's'
message = self._(error_message_dict[message_key] % (arg))
html = []
h = html.append
h('<div id="aid_notification_' + teaser_key + '" class="ui-widget ui-alert">')
h(' <div style="margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">')
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s ' % (teaser, message))
h("<ul>")
for paper in bibrefs:
if ',' in paper:
pbibrec = paper.split(',')[1]
else:
pbibrec = paper
h("<li>%s</li>"
% (format_record(int(pbibrec), "ha")))
h("</ul>")
h('<a rel="nofollow" id="checkout" href="%s/author/claim/action?cancel_search_ticket=True">' %
(CFG_SITE_URL,) + self._('Quit searching.') + '</a>')
if show_close_btn:
h(
' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify">X</a></span></p>')
h(' </div>')
h('</div>')
h('<p> </p>')
return "\n".join(html)
def tmpl_merge_ticket_box(self, teaser_key, message_key, primary_cname):
message = self._('When you merge a set of profiles, all the information stored will be assigned to the primary profile. This includes papers, ids or citations.'
' After merging, only the primary profile will remain in the system, all other profiles will be automatically deleted.</br>')
error_teaser_dict = {'person_search': message}
error_message_dict = {'merge_profiles': 'You are about to merge the following profiles:'}
teaser = self._(error_teaser_dict[teaser_key])
message = self._(error_message_dict[message_key])
html = []
h = html.append
h('<div id="aid_notification_' + teaser_key + '" class="ui-widget ui-alert">')
h(' <div style="margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">')
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> </br>%s ' % (teaser, message))
h("<table id=\"mergeList\" >\
<tr></tr>\
<th></th>\
<th></th>\
<th></th>\
<th></th>\
<tr></tr>")
h("<tr><td></td><td><a id=\"primaryProfile\" href='%s/author/profile/%s'target='_blank'>%s</a></td><td id=\"primaryProfileTd\">primary profile</td><td></td></tr>"
% (CFG_SITE_URL, primary_cname, primary_cname))
# for profile in profiles:
# h("<li><a href='%s'target='_blank' class=\"profile\" >%s</a><a class=\"setPrimaryProfile\">Set as primary</a> <a class=\"removeProfile\">Remove</a></li>"
# % (profile, profile))
h("</table>")
h('<div id="mergeListButtonWrapper">')
h('<form action="%s/author/claim/action" method="get"><input type="hidden" name="cancel_merging" value="True" /> <input type="hidden" name="primary_profile" value="%s" /> <input type="submit" id="cancelMergeButton" class="aid_btn_red" value="%s" /></form>' %
(CFG_SITE_URL, primary_cname, self._('Cancel merging')))
h('<form action="%s/author/claim/action" method="get"><input type="hidden" name="merge" value="True" /><input type="submit" id="mergeButton" class="aid_btn_green" value="%s" /></form>' %
(CFG_SITE_URL, self._('Merge profiles')))
h(' </div>')
h(' </div>')
h('</div>')
h('<p> </p>')
return "\n".join(html)
def tmpl_author_confirmed(self, bibref, pid, verbiage_dict={'alt_confirm': 'Confirmed.',
'confirm_text':
'This record assignment has been confirmed.',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget assignment decision',
'alt_repeal': 'Repeal!',
'repeal_text': 'Repeal record assignment',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'
},
show_reset_button=True):
'''
Generate play per-paper links for the table for the
status "confirmed"
@param bibref: construct of unique ID for this author on this paper
@type bibref: string
@param pid: the Person ID
@type pid: int
@param verbiage_dict: language for the link descriptions
@type verbiage_dict: dict
'''
stri = ('<!--2!--><span id="aid_status_details"> '
'<img src="%(url)s/img/aid_check.png" alt="%(alt_confirm)s" />'
'%(confirm_text)s <br>')
if show_reset_button:
stri = stri + (
'<a rel="nofollow" id="aid_reset_gr" class="aid_grey op_action" href="%(url)s/author/claim/action?reset=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_reset_gray.png" alt="%(alt_forget)s" style="margin-left:22px;" />'
'%(forget_text)s</a><br>')
stri = stri + (
'<a rel="nofollow" id="aid_repeal" class="aid_grey op_action" href="%(url)s/author/claim/action?repeal=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_reject_gray.png" alt="%(alt_repeal)s" style="margin-left:22px;"/>'
'%(repeal_text)s</a><br>'
'<a rel="nofollow" id="aid_to_other" class="aid_grey op_action" href="%(url)s/author/claim/action?to_other_person=True&selection=%(ref)s">'
'<img src="%(url)s/img/aid_to_other_gray.png" alt="%(alt_to_other)s" style="margin-left:22px;"/>'
'%(to_other_text)s</a> </span>')
return (stri
% ({'url': CFG_SITE_URL, 'ref': bibref, 'pid': pid,
'alt_confirm': verbiage_dict['alt_confirm'],
'confirm_text': verbiage_dict['confirm_text'],
'alt_forget': verbiage_dict['alt_forget'],
'forget_text': verbiage_dict['forget_text'],
'alt_repeal': verbiage_dict['alt_repeal'],
'repeal_text': verbiage_dict['repeal_text'],
'to_other_text': verbiage_dict['to_other_text'],
'alt_to_other': verbiage_dict['alt_to_other']}))
def tmpl_author_repealed(self, bibref, pid, verbiage_dict={'alt_confirm': 'Confirm!',
'confirm_text': 'Confirm record assignment.',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget assignment decision',
'alt_repeal': 'Rejected!',
'repeal_text': 'Repeal this record assignment.',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'
}):
'''
Generate play per-paper links for the table for the
status "repealed"
@param bibref: construct of unique ID for this author on this paper
@type bibref: string
@param pid: the Person ID
@type pid: int
@param verbiage_dict: language for the link descriptions
@type verbiage_dict: dict
'''
stri = ('<!---2!--><span id="aid_status_details"> '
'<img src="%(url)s/img/aid_reject.png" alt="%(alt_repeal)s" />'
'%(repeal_text)s <br>'
'<a rel="nofollow" id="aid_confirm" class="aid_grey op_action" href="%(url)s/author/claim/action?confirm=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_check_gray.png" alt="%(alt_confirm)s" style="margin-left: 22px;" />'
'%(confirm_text)s</a><br>'
'<a rel="nofollow" id="aid_to_other" class="aid_grey op_action" href="%(url)s/author/claim/action?to_other_person=True&selection=%(ref)s">'
'<img src="%(url)s/img/aid_to_other_gray.png" alt="%(alt_to_other)s" style="margin-left:22px;"/>'
'%(to_other_text)s</a> </span>')
return (stri
% ({'url': CFG_SITE_URL, 'ref': bibref, 'pid': pid,
'alt_confirm': verbiage_dict['alt_confirm'],
'confirm_text': verbiage_dict['confirm_text'],
'alt_forget': verbiage_dict['alt_forget'],
'forget_text': verbiage_dict['forget_text'],
'alt_repeal': verbiage_dict['alt_repeal'],
'repeal_text': verbiage_dict['repeal_text'],
'to_other_text': verbiage_dict['to_other_text'],
'alt_to_other': verbiage_dict['alt_to_other']}))
def tmpl_author_undecided(self, bibref, pid, verbiage_dict={'alt_confirm': 'Confirm!',
'confirm_text': 'Confirm record assignment.',
'alt_repeal': 'Rejected!',
'repeal_text': 'This record has been repealed.',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'
}):
'''
Generate play per-paper links for the table for the
status "no decision taken yet"
@param bibref: construct of unique ID for this author on this paper
@type bibref: string
@param pid: the Person ID
@type pid: int
@param verbiage_dict: language for the link descriptions
@type verbiage_dict: dict
'''
# batchprocess?mconfirm=True&bibrefs=['100:17,16']&pid=1
string = ('<!--0!--><span id="aid_status_details"> '
'<a rel="nofollow" id="aid_confirm" class="op_action" href="%(url)s/author/claim/action?confirm=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_check.png" alt="%(alt_confirm)s" />'
'%(confirm_text)s</a><br />'
'<a rel="nofollow" id="aid_repeal" class="op_action" href="%(url)s/author/claim/action?repeal=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_reject.png" alt="%(alt_repeal)s" />'
'%(repeal_text)s</a> <br />'
'<a rel="nofollow" id="aid_to_other" class="op_action" href="%(url)s/author/claim/action?to_other_person=True&selection=%(ref)s">'
'<img src="%(url)s/img/aid_to_other.png" alt="%(alt_to_other)s" />'
'%(to_other_text)s</a> </span>')
return (string
% ({'url': CFG_SITE_URL, 'ref': bibref, 'pid': pid,
'alt_confirm': verbiage_dict['alt_confirm'],
'confirm_text': verbiage_dict['confirm_text'],
'alt_repeal': verbiage_dict['alt_repeal'],
'repeal_text': verbiage_dict['repeal_text'],
'to_other_text': verbiage_dict['to_other_text'],
'alt_to_other': verbiage_dict['alt_to_other']}))
def __tmpl_admin_records_table(
self, form_id, person_id, bibrecids, verbiage_dict={
'no_doc_string': 'Sorry, there are currently no documents to be found in this category.',
'b_confirm': 'Confirm',
'b_repeal': 'Repeal',
'b_to_others':
'Assign to another person',
'b_forget': 'Forget decision'},
buttons_verbiage_dict={
'mass_buttons': {'no_doc_string': 'Sorry, there are currently no documents to be found in this category.',
'b_confirm':
'Confirm',
'b_repeal':
'Repeal',
'b_to_others':
'Assign to another person',
'b_forget': 'Forget decision'},
'record_undecided': {'alt_confirm': 'Confirm!',
'confirm_text':
'Confirm record assignment.',
'alt_repeal':
'Rejected!',
'repeal_text': 'This record has been repealed.'},
'record_confirmed': {'alt_confirm': 'Confirmed.',
'confirm_text':
'This record assignment has been confirmed.',
'alt_forget':
'Forget decision!',
'forget_text':
'Forget assignment decision',
'alt_repeal':
'Repeal!',
'repeal_text': 'Repeal record assignment'},
'record_repealed': {'alt_confirm': 'Confirm!',
'confirm_text':
'Confirm record assignment.',
'alt_forget':
'Forget decision!',
'forget_text':
'Forget assignment decision',
'alt_repeal':
'Rejected!',
'repeal_text': 'Repeal this record assignment.'}},
show_reset_button=True):
'''
Generate the big tables for the person overview page
@param form_id: name of the form
@type form_id: string
@param person_id: Person ID
@type person_id: int
@param bibrecids: List of records to display
@type bibrecids: list
@param verbiage_dict: language for the elements
@type verbiage_dict: dict
@param buttons_verbiage_dict: language for the buttons
@type buttons_verbiage_dict: dict
'''
no_papers_html = ['<div style="text-align:left;margin-top:1em;"><strong>']
no_papers_html.append('%s' % self._(verbiage_dict['no_doc_string']))
no_papers_html.append('</strong></div>')
if not bibrecids or not person_id:
return "\n".join(no_papers_html)
pp_html = []
h = pp_html.append
h('<form id="%s" action="/author/claim/action" method="post">'
% (form_id))
# +self._(' On all pages: '))
h('<div class="aid_reclist_selector">')
h('<a rel="nofollow" rel="group_1" href="#select_all">' + self._('Select All') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#select_none">' + self._('Select None') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#invert_selection">' + self._('Invert Selection') + '</a> | ')
h('<a rel="nofollow" id="toggle_claimed_rows" href="javascript:toggle_claimed_rows();" '
'alt="hide">' + self._('Hide successful claims') + '</a>')
h('</div>')
h('<div class="aid_reclist_buttons">')
h(('<img src="%s/img/aid_90low_right.png" alt="∟" />')
% (CFG_SITE_URL))
h('<input type="hidden" name="pid" value="%s" />' % (person_id))
h('<input type="submit" name="assign" value="%s" class="aid_btn_blue" />' % self._(verbiage_dict['b_confirm']))
h('<input type="submit" name="reject" value="%s" class="aid_btn_blue" />' % self._(verbiage_dict['b_repeal']))
h('<input type="submit" name="to_other_person" value="%s" class="aid_btn_blue" />' %
self._(verbiage_dict['b_to_others']))
# if show_reset_button:
# h('<input type="submit" name="reset" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_forget'])
h(" </div>")
h('<table class="paperstable" cellpadding="3" width="100%">')
h("<thead>")
h(" <tr>")
h(' <th> </th>')
h(' <th>' + self._('Paper Short Info') + '</th>')
h(' <th>' + self._('Author Name') + '</th>')
h(' <th>' + self._('Affiliation') + '</th>')
h(' <th>' + self._('Date') + '</th>')
h(' <th>' + self._('Experiment') + '</th>')
h(' <th>' + self._('Actions') + '</th>')
h(' </tr>')
h('</thead>')
h('<tbody>')
for idx, paper in enumerate(bibrecids):
h(' <tr style="padding-top: 6px; padding-bottom: 6px;">')
h(' <td><input type="checkbox" name="selection" '
'value="%s" /> </td>' % (paper['bibref']))
rec_info = format_record(int(paper['recid']), "ha")
rec_info = str(idx + 1) + '. ' + rec_info
h(" <td>%s</td>" % (rec_info))
h(" <td>%s</td>" % (paper['authorname']))
aff = ""
if paper['authoraffiliation']:
aff = paper['authoraffiliation']
else:
aff = self._("Not assigned")
h(" <td>%s</td>" % (aff))
if paper['paperdate']:
pdate = paper['paperdate']
else:
pdate = 'N.A.'
h(" <td>%s</td>" % pdate)
if paper['paperexperiment']:
pdate = paper['paperexperiment']
else:
pdate = 'N.A.'
h(" <td>%s</td>" % pdate)
paper_status = self._("No status information found.")
if paper['flag'] == 2:
paper_status = self.tmpl_author_confirmed(paper['bibref'], person_id,
verbiage_dict=buttons_verbiage_dict['record_confirmed'],
show_reset_button=show_reset_button)
elif paper['flag'] == -2:
paper_status = self.tmpl_author_repealed(paper['bibref'], person_id,
verbiage_dict=buttons_verbiage_dict['record_repealed'])
else:
paper_status = self.tmpl_author_undecided(paper['bibref'], person_id,
verbiage_dict=buttons_verbiage_dict['record_undecided'])
h(' <td><div id="bibref%s" style="float:left"><!--%s!-->%s </div>'
% (paper['bibref'], paper['flag'], paper_status))
if 'rt_status' in paper and paper['rt_status']:
h('<img src="%s/img/aid_operator.png" title="%s" '
'alt="actions pending" style="float:right" '
'height="24" width="24" />'
% (CFG_SITE_URL, self._("Operator review of user actions pending")))
h(' </td>')
h(" </tr>")
h(" </tbody>")
h("</table>")
# +self._(' On all pages: '))
h('<div class="aid_reclist_selector">')
h('<a rel="nofollow" rel="group_1" href="#select_all">' + self._('Select All') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#select_none">' + self._('Select None') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#invert_selection">' + self._('Invert Selection') + '</a> | ')
h('<a rel="nofollow" id="toggle_claimed_rows" href="javascript:toggle_claimed_rows();" '
'alt="hide">' + self._('Hide successful claims') + '</a>')
h('</div>')
h('<div class="aid_reclist_buttons">')
h(('<img src="%s/img/aid_90low_right.png" alt="∟" />')
% (CFG_SITE_URL))
h('<input type="hidden" name="pid" value="%s" />' % (person_id))
h('<input type="submit" name="assign" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_confirm'])
h('<input type="submit" name="reject" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_repeal'])
h('<input type="submit" name="to_other_person" value="%s" class="aid_btn_blue" />' %
verbiage_dict['b_to_others'])
# if show_reset_button:
# h('<input type="submit" name="reset" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_forget'])
h(" </div>")
h("</form>")
return "\n".join(pp_html)
def __tmpl_reviews_table(self, person_id, bibrecids, admin=False):
'''
Generate the table for potential reviews.
@param form_id: name of the form
@type form_id: string
@param person_id: Person ID
@type person_id: int
@param bibrecids: List of records to display
@type bibrecids: list
@param admin: Show admin functions
@type admin: boolean
'''
no_papers_html = ['<div style="text-align:left;margin-top:1em;"><strong>']
no_papers_html.append(self._('Sorry, there are currently no records to be found in this category.'))
no_papers_html.append('</strong></div>')
if not bibrecids or not person_id:
return "\n".join(no_papers_html)
pp_html = []
h = pp_html.append
h('<form id="review" action="/author/claim/batchprocess" method="post">')
h('<table class="reviewstable" cellpadding="3" width="100%">')
h(' <thead>')
h(' <tr>')
h(' <th> </th>')
h(' <th>' + self._('Paper Short Info') + '</th>')
h(' <th>' + self._('Actions') + '</th>')
h(' </tr>')
h(' </thead>')
h(' <tbody>')
for paper in bibrecids:
h(' <tr>')
h(' <td><input type="checkbox" name="selected_bibrecs" '
'value="%s" /> </td>' % (paper))
rec_info = format_record(int(paper[0]), "ha")
if not admin:
rec_info = rec_info.replace("person/search?q=", "author/")
h(" <td>%s</td>" % (rec_info))
h(' <td><a rel="nofollow" href="%s/author/claim/batchprocess?selected_bibrecs=%s&mfind_bibref=claim">' % (CFG_SITE_URL, paper) +
self._('Review Transaction') + '</a></td>')
h(" </tr>")
h(" </tbody>")
h("</table>")
h('<div style="text-align:left;"> ' + self._('On all pages') + ': ')
h('<a rel="nofollow" rel="group_1" href="#select_all">' + self._('Select All') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#select_none">' + self._('Select None') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#invert_selection">' + self._('Invert Selection') + '</a>')
h('</div>')
h('<div style="vertical-align:middle;">')
h('∟ ' + self._('With selected do') + ': ')
h('<input type="hidden" name="pid" value="%s" />' % (person_id))
h('<input type="hidden" name="mfind_bibref" value="claim" />')
h('<input type="submit" name="submit" value="Review selected transactions" />')
h(" </div>")
h('</form>')
return "\n".join(pp_html)
def tmpl_admin_tabs(self, ln=CFG_SITE_LANG, person_id=-1,
rejected_papers=[],
rest_of_papers=[],
review_needed=[],
rt_tickets=[],
open_rt_tickets=[],
show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
show_reset_button=True,
ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'],
verbiage_dict={'confirmed': 'Records', 'repealed': 'Not this person\'s records',
'review': 'Records in need of review',
'tickets': 'Open Tickets', 'data': 'Data',
'confirmed_ns': 'Papers of this Person',
'repealed_ns': 'Papers _not_ of this Person',
'review_ns': 'Papers in need of review',
'tickets_ns': 'Tickets for this Person',
'data_ns': 'Additional Data for this Person'},
buttons_verbiage_dict={
'mass_buttons': {'no_doc_string': 'Sorry, there are currently no documents to be found in this category.',
'b_confirm': 'Confirm',
'b_repeal': 'Repeal',
'b_to_others': 'Assign to another person',
'b_forget': 'Forget decision'},
'record_undecided': {'alt_confirm': 'Confirm!',
'confirm_text': 'Confirm record assignment.',
'alt_repeal': 'Rejected!',
'repeal_text': 'This record has been repealed.'},
'record_confirmed': {'alt_confirm': 'Confirmed.',
'confirm_text':
'This record assignment has been confirmed.',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget assignment decision',
'alt_repeal': 'Repeal!',
'repeal_text': 'Repeal record assignment'},
'record_repealed': {'alt_confirm': 'Confirm!',
'confirm_text': 'Confirm record assignment.',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget assignment decision',
'alt_repeal': 'Rejected!',
'repeal_text': 'Repeal this record assignment.'}}):
'''
Generate the tabs for the person overview page
@param ln: the language to use
@type ln: string
@param person_id: Person ID
@type person_id: int
@param rejected_papers: list of repealed papers
@type rejected_papers: list
@param rest_of_papers: list of attributed of undecided papers
@type rest_of_papers: list
@param review_needed: list of papers that need a review (choose name)
@type review_needed:list
@param rt_tickets: list of tickets for this Person
@type rt_tickets: list
@param open_rt_tickets: list of open request tickets
@type open_rt_tickets: list
@param show_tabs: list of tabs to display
@type show_tabs: list of strings
@param ticket_links: list of links to display
@type ticket_links: list of strings
@param verbiage_dict: language for the elements
@type verbiage_dict: dict
@param buttons_verbiage_dict: language for the buttons
@type buttons_verbiage_dict: dict
'''
html = []
h = html.append
h('<div id="aid_tabbing">')
h(' <ul>')
if 'records' in show_tabs:
r = verbiage_dict['confirmed']
h(' <li><a rel="nofollow" href="#tabRecords"><span>%(r)s (%(l)s)</span></a></li>' %
({'r': r, 'l': len(rest_of_papers)}))
if 'repealed' in show_tabs:
r = verbiage_dict['repealed']
h(' <li><a rel="nofollow" href="#tabNotRecords"><span>%(r)s (%(l)s)</span></a></li>' %
({'r': r, 'l': len(rejected_papers)}))
if 'review' in show_tabs:
r = verbiage_dict['review']
h(' <li><a rel="nofollow" href="#tabReviewNeeded"><span>%(r)s (%(l)s)</span></a></li>' %
({'r': r, 'l': len(review_needed)}))
if 'tickets' in show_tabs:
r = verbiage_dict['tickets']
h(' <li><a rel="nofollow" href="#tabTickets"><span>%(r)s (%(l)s)</span></a></li>' %
({'r': r, 'l': len(open_rt_tickets)}))
if 'data' in show_tabs:
r = verbiage_dict['data']
h(' <li><a rel="nofollow" href="#tabData"><span>%s</span></a></li>' % r)
userid = get_uid_of_author(person_id)
if userid:
h('<img src="%s/img/webbasket_user.png" alt="%s" width="30" height="30" />' %
(CFG_SITE_URL, self._("The author has an internal ID!")))
h(' </ul>')
if 'records' in show_tabs:
h(' <div id="tabRecords">')
r = verbiage_dict['confirmed_ns']
h('<noscript><h5>%s</h5></noscript>' % r)
h(self.__tmpl_admin_records_table("massfunctions",
person_id, rest_of_papers,
verbiage_dict=buttons_verbiage_dict['mass_buttons'],
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button))
h(" </div>")
if 'repealed' in show_tabs:
h(' <div id="tabNotRecords">')
r = verbiage_dict['repealed_ns']
h('<noscript><h5>%s</h5></noscript>' % r)
h(self._('These records have been marked as not being from this person.'))
h('<br />' + self._('They will be regarded in the next run of the author ')
+ self._('disambiguation algorithm and might disappear from this listing.'))
h(self.__tmpl_admin_records_table("rmassfunctions",
person_id, rejected_papers,
verbiage_dict=buttons_verbiage_dict['mass_buttons'],
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button))
h(" </div>")
if 'review' in show_tabs:
h(' <div id="tabReviewNeeded">')
r = verbiage_dict['review_ns']
h('<noscript><h5>%s</h5></noscript>' % r)
h(self.__tmpl_reviews_table(person_id, review_needed, True))
h(' </div>')
if 'tickets' in show_tabs:
h(' <div id="tabTickets">')
r = verbiage_dict['tickets']
h('<noscript><h5>%s</h5></noscript>' % r)
r = verbiage_dict['tickets_ns']
h('<p>%s:</p>' % r)
if rt_tickets:
pass
# open_rt_tickets = [a for a in open_rt_tickets if a[1] == rt_tickets]
for t in open_rt_tickets:
name = self._('Not provided')
surname = self._('Not provided')
uidip = self._('Not available')
comments = self._('No comments')
email = self._('Not provided')
date = self._('Not Available')
actions = []
rtid = None
for info in t[0]:
if info[0] == 'firstname':
name = info[1]
elif info[0] == 'lastname':
surname = info[1]
elif info[0] == 'uid-ip':
uidip = info[1]
elif info[0] == 'comments':
comments = info[1]
elif info[0] == 'email':
email = info[1]
elif info[0] == 'date':
date = info[1]
elif info[0] in ['assign', 'reject']:
actions.append(info)
elif info[0] == 'rtid':
rtid = info[1]
if 'delete' in ticket_links:
h(('<strong>Ticket number: %(tnum)s </strong> <a rel="nofollow" id="cancel" href=%(url)s/author/claim/action?cancel_rt_ticket=True&selection=%(tnum)s&pid=%(pid)s>' + self._(' Delete this ticket') + ' </a>')
% ({'tnum': t[1], 'url': CFG_SITE_URL, 'pid': str(person_id)}))
if 'commit' in ticket_links:
h((' or <a rel="nofollow" id="commit" href=%(url)s/author/claim/action?commit_rt_ticket=True&selection=%(tnum)s&pid=%(pid)s>' + self._(' Commit this entire ticket') + ' </a> <br>')
% ({'tnum': t[1], 'url': CFG_SITE_URL, 'pid': str(person_id)}))
h('<dd>')
h('Open from: %s, %s <br>' % (surname, name))
h('Date: %s <br>' % date)
h('identified by: %s <br>' % uidip)
h('email: %s <br>' % email)
h('comments: %s <br>' % comments)
h('Suggested actions: <br>')
h('<dd>')
for a in actions:
bibref, bibrec = a[1].split(',')
pname = get_bibrefrec_name_string(bibref)
title = ""
try:
title = get_fieldvalues(int(bibrec), "245__a")[0]
except IndexError:
title = self._("No title available")
title = escape_html(title)
if 'commit_entry' in ticket_links:
h('<a rel="nofollow" id="action" href="%(url)s/author/claim/action?%(action)s=True&pid=%(pid)s&selection=%(bib)s&rt_id=%(rt)s">%(action)s - %(name)s on %(title)s </a>'
% ({'action': a[0], 'url': CFG_SITE_URL,
'pid': str(person_id), 'bib': a[1],
'name': pname, 'title': title, 'rt': t[1]}))
else:
h('%(action)s - %(name)s on %(title)s'
% ({'action': a[0], 'name': pname, 'title': title}))
if 'del_entry' in ticket_links:
h(' - <a rel="nofollow" id="action" href="%(url)s/author/claim/action?cancel_rt_ticket=True&pid=%(pid)s&selection=%(bib)s&rt_id=%(rt)s&rt_action=%(action)s"> Delete this entry </a>'
% ({'action': a[0], 'url': CFG_SITE_URL,
'pid': str(person_id), 'bib': a[1], 'rt': t[1]}))
h(' - <a rel="nofollow" id="show_paper" target="_blank" href="%(url)s/record/%(record)s"> View record </a><br>' %
({'url': CFG_SITE_URL, 'record': str(bibrec)}))
if rtid:
h('<a rel="nofollow" id="closert" href="%(url)s/author/claim/action?close_rt_ticket=True&rtid=%(rtid)s&pid=%(pid)s">Close this ticket in RT</a>'
% ({'url': CFG_SITE_URL, 'rtid': rtid,
'pid': str(person_id)}))
h('</dd>')
h('</dd><br>')
# h(str(open_rt_tickets))
h(" </div>")
if 'data' in show_tabs:
h(' <div id="tabData">')
r = verbiage_dict['data_ns']
h('<noscript><h5>%s</h5></noscript>' % r)
full_canonical_name = str(get_canonical_id_from_person_id(person_id))
if '.' in str(full_canonical_name) and not isinstance(full_canonical_name, int):
canonical_name = full_canonical_name[0:full_canonical_name.rindex('.')]
else:
canonical_name = str(person_id)
h('<div> <strong> Person id </strong> <br> %s <br>' % person_id)
h('<strong> <br> Canonical name setup </strong>')
h('<div style="margin-top: 15px;"> Current canonical name: %s' % full_canonical_name)
h('<form method="GET" action="%s/author/claim/action" rel="nofollow">' % CFG_SITE_URL)
h('<input type="hidden" name="set_canonical_name" value="True" />')
h('<input name="canonical_name" id="canonical_name" type="text" style="border:1px solid #333; width:500px;" value="%s" /> ' %
canonical_name)
h('<input type="hidden" name="pid" value="%s" />' % person_id)
h('<input type="submit" value="set canonical name" class="aid_btn_blue" />')
h(
'<br>NOTE: If the canonical ID is without any number (e.g. J.Ellis), it will take the first available number. ')
h('If the canonical ID is complete (e.g. J.Ellis.1) that ID will be assigned to the current person ')
h('and if another person had that ID, he will lose it and get a new one. </form>')
h('</div>')
userid = get_uid_of_author(person_id)
h('<div> <br>')
h('<strong> Internal IDs </strong> <br>')
if userid:
email = get_email(int(userid))
h('UserID: INSPIRE user %s is associated with this profile with email: %s' % (str(userid), str(email)))
else:
h('UserID: There is no INSPIRE user associated to this profile!')
h('<br></div>')
h('</div> </div>')
h('</div>')
return "\n".join(html)
def tmpl_invenio_search_box(self):
'''
Generate little search box for missing papers. Links to main invenio
search on start papge.
'''
html = []
h = html.append
h('<div style="margin-top: 15px;"> <strong>Search for missing papers:</strong> <form method="GET" action="%s/search">' %
CFG_SITE_URL)
h('<input name="p" id="p" type="text" style="border:1px solid #333; width:500px;" /> ')
h('<input type="submit" name="action_search" value="search" '
'class="aid_btn_blue" />')
h('</form> </div>')
return "\n".join(html)
def tmpl_choose_profile_search_new_person_generator(self, free_id):
def stub():
text = self._("Create new profile")
link = "%s/author/claim/action?associate_profile=True&pid=%s" % (CFG_SITE_URL, free_id)
return text, link
return stub
def tmpl_assigning_search_new_person_generator(self, bibrefs):
def stub():
text = self._("Create a new Person")
link = "%s/author/claim/action?confirm=True&pid=%s" % (CFG_SITE_URL, str(CREATE_NEW_PERSON))
for r in bibrefs:
link = link + '&selection=%s' % str(r)
return text, link
return stub
def tmpl_choose_profile_search_button_generator(self):
def stub(pid, search_param):
text = self._("This is my profile")
parameters = [('associate_profile', True), ('pid', str(pid)), ('search_param', search_param)]
link = "%s/author/claim/action" % (CFG_SITE_URL)
css_class = ""
to_disable = True
return text, link, parameters, css_class, to_disable
return stub
def tmpl_assigning_search_button_generator(self, bibrefs):
def stub(pid, search_param):
text = self._("Assign paper")
parameters = [('confirm', True), ('pid', str(pid)), ('search_param', search_param)]
for r in bibrefs:
parameters.append(('selection', str(r)))
link = "%s/author/claim/action" % (CFG_SITE_URL)
css_class = ""
to_disable = False
return text, link, parameters, css_class, to_disable
return stub
def merge_profiles_button_generator(self):
def stub(pid, search_param):
text = self._("Add to merge list")
parameters = []
link = ""
css_class = "addToMergeButton"
to_disable = False
return text, link, parameters, css_class, to_disable
return stub
def tmpl_choose_profile_search_bar(self):
def stub(search_param):
activated = True
parameters = [('search_param', search_param)]
link = "%s/author/choose_profile" % (CFG_SITE_URL, )
return activated, parameters, link
return stub
def tmpl_general_search_bar(self):
def stub(search_param,):
activated = True
parameters = [('q', search_param)]
link = "%s/author/search" % (CFG_SITE_URL, )
return activated, parameters, link
return stub
def tmpl_merge_profiles_search_bar(self, primary_profile):
def stub(search_param):
activated = True
parameters = [('search_param', search_param), ('primary_profile', primary_profile)]
link = "%s/author/merge_profiles" % (CFG_SITE_URL, )
return activated, parameters, link
return stub
def tmpl_author_search(self, query, results, shown_element_functions):
'''
Generates the search for Person entities.
@param query: the query a user issued to the search
@type query: string
@param results: list of results
@type results: list
@param search_ticket: search ticket object to inform about pending
claiming procedure
@type search_ticket: dict
'''
if not query:
query = ""
html = []
h = html.append
search_bar_activated = False
if 'show_search_bar' in shown_element_functions.keys():
search_bar_activated, parameters, link = shown_element_functions['show_search_bar'](query)
if search_bar_activated:
h(
'<div class="fg-toolbar ui-toolbar ui-widget-header ui-corner-tl ui-corner-tr ui-helper-clearfix" id="aid_search_bar">')
h('<form id="searchform" action="%s" method="GET">' % (link,))
h('Find author clusters by name. e.g: <i>Ellis, J</i>: <br>')
for param in parameters[1:]:
h('<input type="hidden" name=%s value=%s>' % (param[0], param[1]))
h('<input placeholder="Search for a name, e.g: Ellis, J" type="text" name=%s style="border:1px solid #333; width:500px;" '
'maxlength="250" value="%s" class="focus" />' % (parameters[0][0], parameters[0][1]))
h('<input type="submit" value="Search" />')
h('</form>')
if 'new_person_gen' in shown_element_functions.keys():
new_person_text, new_person_link = shown_element_functions['new_person_gen']()
h('<a rel="nofollow" href="%s" ><button type="button" id="new_person_link">%s' %
(new_person_link, new_person_text))
h('</button></a>')
h('</div>')
if not results and not query:
h('</div>')
return "\n".join(html)
if query and not results:
authemail = CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL
h(('<strong>' + self._("We do not have a publication list for '%s'." +
" Try using a less specific author name, or check" +
" back in a few days as attributions are updated " +
"frequently. Or you can send us feedback, at ") +
"<a rel='nofollow' href=\"mailto:%s\">%s</a>.</strong>") % (query, authemail, authemail))
h('</div>')
return "\n".join(html)
show_action_button = False
if 'button_gen' in shown_element_functions.keys():
show_action_button = True
show_status = False
if 'show_status' in shown_element_functions.keys():
show_status = True
pass_status = False
if 'pass_status' in shown_element_functions.keys():
pass_status = True
# base_color = 100
# row_color = 0
# html table
h('<table id="personsTable">')
h('<!-- Table header -->\
<thead>\
<tr>\
<th scope="col" id="Number" style="width:75px;">Number</th>\
<th scope="col" id="Identifier">Identifier</th>\
<th scope="col" id="Names">Names</th>\
<th scope="col" id="IDs">IDs</th>\
<th scope="col" id="Papers" style="width:350px">Papers</th>\
<th scope="col" id="Link">Link</th>')
if show_status:
h(' <th scope="col" id="Status" >Status</th>')
if show_action_button:
h(' <th scope="col" id="Action">Action</th>')
h(' </tr>\
</thead>\
<!-- Table body -->\
<tbody>')
for index, result in enumerate(results):
# if len(results) > base_color:
# row_color += 1
# else:
# row_color = base_color - (base_color - index *
# base_color / len(results)))
pid = result['pid']
canonical_id = result['canonical_id']
# person row
h('<tr id="pid' + str(pid) + '">')
h('<td>%s</td>' % (index + 1))
# for nindex, name in enumerate(names):
# color = row_color + nindex * 35
# color = min(color, base_color)
# h('<span style="color:rgb(%d,%d,%d);">%s; </span>'
# % (color, color, color, name[0]))
# Identifier
if canonical_id:
h('<td>%s</td>' % (canonical_id,))
else:
canonical_id = ''
h('<td>%s</td>' % ('No canonical id',))
# Names
h('<td class="emptyName' + str(pid) + '">')
# html.extend(self.tmpl_gen_names(names))
h('</td>')
# IDs
h('<td class="emptyIDs' + str(pid) + '" >') # style="text-align:left;padding-left:35px;"
# html.extend(self.tmpl_gen_ext_ids(external_ids))
h('</td>')
# Recent papers
h('<td>')
h(('<a rel="nofollow" href="#" id="aid_moreinfolink" class="mpid%s">'
'<img src="../img/aid_plus_16.png" '
'alt = "toggle additional information." '
'width="11" height="11"/> '
+ self._('Recent Papers') +
'</a>')
% (pid))
h('<div class="more-mpid%s" id="aid_moreinfo">' % (pid))
h('</div>')
h('</td>')
# Link
h('<td>')
h(('<span>'
'<em><a rel="nofollow" href="%s/author/profile/%s" id="aid_moreinfolink" target="_blank">'
+ self._('Go to Profile ') + '(%s)</a></em></span>')
% (CFG_SITE_URL, get_person_redirect_link(pid),
get_person_redirect_link(pid)))
h('</td>')
hidden_status = ""
if pass_status:
if result["status"]:
status = "Available"
else:
status = "Not available"
hidden_status = '<input type="hidden" name="profile_availability" value="%s"/>' % status
if show_status:
h('<td>%s</td>' % (status))
if show_action_button:
action_button_text, action_button_link, action_button_parameters, action_button_class, action_button_to_disable = shown_element_functions[
'button_gen'](pid, query) # class
# Action link
h('<td class="uncheckedProfile' + str(pid) + '" style="text-align:center; vertical-align:middle;">')
parameters_sublink = ''
if action_button_link:
parameters_sublink = '<input type="hidden" name="%s" value="%s" />' % (
action_button_parameters[0][0], str(action_button_parameters[0][1]))
for (param_type, param_value) in action_button_parameters[1:]:
parameters_sublink += '<input type="hidden" name="%s" value="%s" />' % (
param_type, str(param_value))
disabled = ""
if show_status:
if not result["status"] and action_button_to_disable:
disabled = "disabled"
h('<form action="%s" method="get">%s%s<input type="submit" name="%s" class="%s aid_btn_blue" value="%s" %s/></form>' %
(action_button_link, parameters_sublink, hidden_status, canonical_id, action_button_class, action_button_text, disabled)) # confirmlink check if canonical id
h('</td>')
h('</tr>')
h('</tbody>')
h('</table>')
return "\n".join(html)
def tmpl_gen_papers(self, papers):
"""
Generates the recent papers html code.
Returns a list of strings
"""
html = []
h = html.append
if papers:
h((self._('Showing the') + ' %d ' + self._('most recent documents:')) % len(papers))
h("<ul>")
for paper in papers:
h("<li>%s</li>"
% (format_record(int(paper[0]), "ha")))
h("</ul>")
elif not papers:
h("<p>" + self._('Sorry, there are no documents known for this person') + "</p>")
return html
def tmpl_gen_names(self, names):
"""
Generates the names html code.
Returns a list of strings
"""
html = []
h = html.append
delimiter = ";"
if names:
for i, name in enumerate(names):
if i == 0:
h('<span>%s</span>'
% (name[0],))
else:
h('<span">%s  %s</span>'
% (delimiter, name[0]))
else:
h('%s' % ('No names found',))
return html
def tmpl_gen_ext_ids(self, external_ids):
"""
Generates the external ids html code.
Returns a list of strings
"""
html = []
h = html.append
if external_ids:
h('<table id="externalIDsTable">')
for key, value in external_ids.iteritems():
h('<tr>')
h('<td style="margin-top:5px; width:1px; padding-right:2px;">%s:</td>' % key)
h('<td style="padding-left:5px;width:1px;">')
for i, item in enumerate(value):
if i == 0:
h('%s' % item)
else:
h('; %s' % item)
h('</td>')
h('</tr>')
h('</table>')
else:
h('%s' % ('No external ids found',))
return html
def tmpl_choose_profile_footer(self):
return ('<br>In case you don\'t find the correct match or your profile is already taken, please contact us here: <a rel="nofollow" href="mailto:%s">%s</a></p>'
% (CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL,
CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL))
def tmpl_probable_profile_suggestion(
self,
probable_profile_suggestion_info,
last_viewed_profile_suggestion_info,
search_param):
'''
Suggest the most likely profile that the user can be based on his papers in external systems that is logged in through.
'''
html = []
h = html.append
last_viewed_profile_message = self._("The following profile is the one you were viewing before logging in: ")
# if the user has searched then his choice should be remembered in case the chosen profile is not available
param = ''
if search_param:
param = '&search_param=' + search_param
h('<ul>')
if probable_profile_suggestion_info:
probable_profile_message = self._("Out of %s paper(s) claimed to your arXiv account, %s match this profile: " %
(probable_profile_suggestion_info['num_of_arXiv_papers'],
probable_profile_suggestion_info['num_of_recids_intersection']))
h('<li>')
h('%s %s ' % (probable_profile_message, probable_profile_suggestion_info['name_string']))
h('<a href="%s/author/profile/%s" target="_blank"> %s </a>' % (CFG_SITE_URL, probable_profile_suggestion_info['canonical_id'],
probable_profile_suggestion_info['canonical_name_string']))
h('<a rel="nofollow" href="%s/author/claim/action?associate_profile=True&pid=%s%s" class="confirmlink"><button type="button">%s</a>' % (CFG_SITE_URL,
str(probable_profile_suggestion_info['pid']), param, 'This is my profile'))
h('</li>')
if last_viewed_profile_suggestion_info:
h('<li>')
h('%s %s ' % (last_viewed_profile_message, last_viewed_profile_suggestion_info['name_string']))
h('<a href="%s/author/profile/%s" target="_blank"> %s </a>' % (CFG_SITE_URL, last_viewed_profile_suggestion_info['canonical_id'],
last_viewed_profile_suggestion_info['canonical_name_string']))
h('<a rel="nofollow" href="%s/author/claim/action?associate_profile=True&pid=%s%s" class="confirmlink"><button type="button">%s</a>' % (CFG_SITE_URL,
str(last_viewed_profile_suggestion_info['pid']), param, 'This is my profile'))
h('</li>')
h("</ul>")
message = self._(
"If none of the options suggested above apply, you can look for other possible options from the list below:")
h('<p>%s</p>' % (message,))
h('</br>')
return "\n".join(html)
def tmpl_choose_profile(self, failed):
'''
SSO landing/choose_profile page.
'''
html = []
h = html.append
if failed:
h(
'<p><strong><font color="red">Unfortunately the profile you chose is no longer available.</font></strong></p>')
h(
'<p>We apologise for the inconvenience. Please select another one.</br>Keep in mind that you can create an empty profile and then claim all of your papers in it.')
else:
h(
'<p><b>You have now successfully logged in via arXiv.org, please choose your profile among these suggestions: </b></p>')
return "\n".join(html)
def tmpl_tickets_admin(self, tickets=[]):
'''
Open tickets short overview for operators.
'''
html = []
h = html.append
if len(tickets) > 0:
h('List of open tickets: <br><br>')
for t in tickets:
h('<a rel="nofollow" href=%(cname)s#tabTickets> %(longname)s - (%(cname)s - PersonID: %(pid)s): %(num)s open tickets. </a><br>'
% ({'cname': str(t[1]), 'longname': str(t[0]), 'pid': str(t[2]), 'num': str(t[3])}))
else:
h('There are currently no open tickets.')
return "\n".join(html)
def tmpl_update_hep_name_headers(self):
"""
Headers used for the hepnames update form
"""
html = []
html.append(r"""<style type="text/css">
.form1
{
margin-left: auto;
margin-right: auto;
}
#tblGrid {
margin-left: 5%;
}
#tblGrid td {
padding-left: 60px;
}
.form2
{
margin-left: 15%;
margin-right: 30%;
}
.span_float_right
{
float:right;
}
.span_float_left
{
float:left;
}
</style>
<script type="text/javascript" src="/js/hepname_update.js"></script>
""")
return "\n".join(html)
def tmpl_update_hep_name(self, full_name, display_name, email,
status, research_field_list,
institution_list, phd_advisor_list,
experiment_list, web_page):
"""
Create form to update a hep name
"""
# Prepare parameters
try:
phd_advisor = phd_advisor_list[0]
except IndexError:
phd_advisor = ''
try:
phd_advisor2 = phd_advisor_list[1]
except IndexError:
phd_advisor2 = ''
is_active = is_retired = is_departed = is_deceased = ''
if status == 'ACTIVE':
is_active = 'selected'
elif status == 'RETIRED':
is_retired = 'selected'
if status == 'DEPARTED':
is_departed = 'selected'
if status == 'DECEASED':
is_deceased = 'selected'
research_field_html = """
<TD><INPUT TYPE=CHECKBOX VALUE=ACC-PHYS name=field>acc-phys</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=ASTRO-PH name=field>astro-ph</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=ATOM-PH name=field>atom-ph</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=CHAO-DYN name=field>chao-dyn</TD></TR>
<tr><TD><INPUT TYPE=CHECKBOX VALUE=CLIMATE name=field>climate</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=COMP name=field>comp</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=COND-MAT name=field>cond-mat</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=GENL-TH name=field>genl-th</TD></TR>
<tr><TD><INPUT TYPE=CHECKBOX VALUE=GR-QC name=field>gr-qc</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=HEP-EX name=field>hep-ex</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=HEP-LAT name=field>hep-lat</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=HEP-PH name=field>hep-ph</TD></TR>
<TR>
<TD><INPUT TYPE=CHECKBOX VALUE=HEP-TH name=field>hep-th</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=INSTR name=field>instr</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=LIBRARIAN name=field>librarian</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=MATH name=field>math</TD></TR>
<TR>
<TD><INPUT TYPE=CHECKBOX VALUE=MATH-PH name=field>math-ph</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=MED-PHYS name=field>med-phys</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=NLIN name=field>nlin</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=NUCL-EX name=field>nucl-ex</TD></TR>
<TR>
<TD><INPUT TYPE=CHECKBOX VALUE=NUCL-TH name=field>nucl-th</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=PHYSICS name=field>physics</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=PLASMA-PHYS name=field>plasma-phys</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=Q-BIO name=field>q-bio</TD></TR>
<TR>
<TD><INPUT TYPE=CHECKBOX VALUE=QUANT-PH name=field>quant-ph</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=SSRL name=field>ssrl</TD>
<TD><INPUT TYPE=CHECKBOX VALUE=OTHER name=field>other</TD>
"""
for research_field in research_field_list:
research_field_html = research_field_html.replace(
'VALUE=' + research_field,
'checked ' + 'VALUE=' + research_field)
institutions_html = ""
institution_num = 1
for institution_entry in institution_list:
institution = """
<tr>
<td> </td>
<td class="cell_padding"><input name="aff.str" type="hidden">
<input type="text" name="inst%(institution_num)s" size="35" value =%(institution_name)s /></td>
<td class="cell_padding"><select name="rank%(institution_num)s">
<option selected value=''> </option>
<option value='SENIOR'>Senior(permanent)</option>
<option value='JUNIOR'>Junior(leads to Senior)</option>
<option value='STAFF'>Staff(non-research)</option>
<option value='VISITOR'>Visitor</option>
<option value='PD'>PostDoc</option>
<option value='PHD'>PhD</option>
<option value='MAS'>Masters</option>
<option value='UG'>Undergrad</option></select></td>
<TD class="cell_padding"><INPUT TYPE="TEXT" value=%(start_year)s name="sy%(institution_num)s" SIZE="4"/>
<INPUT TYPE="TEXT" value=%(end_year)s name="ey%(institution_num)s" SIZE="4"/></TD>
<TD class="cell_padding"> <INPUT TYPE=CHECKBOX VALUE='Y' name="current%(institution_num)s">
<input type="button" value="Delete row" class="formbutton" onclick="removeRow(this);" />
</td>
</tr>
""" % { 'institution_name': xml.sax.saxutils.quoteattr(institution_entry[0]),
'start_year': xml.sax.saxutils.quoteattr(institution_entry[2]),
'end_year': xml.sax.saxutils.quoteattr(institution_entry[3]),
'institution_num': institution_num
}
institution_num += 1
institution = institution.replace(
'value=' + '\'' + institution_entry[1] + '\'',
'selected ' + 'VALUE=' + institution_entry[1])
if institution_entry[4] == 'Current':
institution = institution.replace("VALUE='Y'", 'checked ' + "VALUE='Y'")
institutions_html += institution
institutions_html += "<script>occcnt = %s; </script>" % (institution_num - 1)
experiments_html = """
<select name=exp id=exp multiple=yes>
<option value=""> </option>
<option value=AMANDA>AMANDA</option>
<option value=AMS>AMS</option>
<option value=ANTARES>ANTARES</option>
<option value=AUGER>AUGER</option>
<option value=BAIKAL>BAIKAL</option>
<option value=BNL-E-0877>BNL-E-0877</option>
<option value=BNL-LEGS>BNL-LEGS</option>
<option value=BNL-RHIC-BRAHMS>BNL-RHIC-BRAHMS</option>
<option value=BNL-RHIC-PHENIX>BNL-RHIC-PHENIX</option>
<option value=BNL-RHIC-PHOBOS>BNL-RHIC-PHOBOS</option>
<option value=BNL-RHIC-STAR>BNL-RHIC-STAR</option>
<option value=CDMS>CDMS</option>
<option value=CERN-LEP-ALEPH>CERN-LEP-ALEPH</option>
<option value=CERN-LEP-DELPHI>CERN-LEP-DELPHI</option>
<option value=CERN-LEP-L3>CERN-LEP-L3</option>
<option value=CERN-LEP-OPAL>CERN-LEP-OPAL</option>
<option value=CERN-LHC-ALICE>CERN-LHC-ALICE</option>
<option value=CERN-LHC-ATLAS>CERN-LHC-ATLAS</option>
<option value=CERN-LHC-B>CERN-LHC-B</option>
<option value=CERN-LHC-CMS>CERN-LHC-CMS</option>
<option value=CERN-LHC-LHCB>CERN-LHC-LHCB</option>
<option value=CERN-NA-060>CERN-NA-060</option>
<option value=CERN-NA-061>CERN-NA-061</option>
<option value=CERN-NA-062>CERN-NA-062</option>
<option value=CERN-PS-214>CERN-PS-214 (HARP)</option>
<option value=CESR-CLEO>CESR-CLEO</option>
<option value=CESR-CLEO-C>CESR-CLEO-C</option>
<option value=CESR-CLEO-II>CESR-CLEO-II</option>
<option value=CHIMERA>CHIMERA</option>
<option value=COBRA>COBRA</option>
<option value=COSY-ANKE>COSY-ANKE</option>
<option value=CUORE>CUORE</option>
<option value=COUPP>COUPP</option>
<option value=DAYA-BAY>DAYA-BAY</option>
<option value=DESY-DORIS-ARGUS>DESY-DORIS-ARGUS</option>
<option value=DESY-HERA-B>DESY-HERA-B</option>
<option value=DESY-HERA-H1>DESY-HERA-H1</option>
<option value=DESY-HERA-HERMES>DESY-HERA-HERMES</option>
<option value=DESY-HERA-ZEUS>DESY-HERA-ZEUS</option>
<option value=DESY-PETRA-MARK-J>DESY-PETRA-MARK-J</option>
<option value=DESY-PETRA-PLUTO-2>DESY-PETRA-PLUTO-2</option>
<option value=DESY-PETRA-TASSO>DESY-PETRA-TASSO</option>
<option value=DOUBLE-CHOOZ>DOUBLE-CHOOZ</option>
<option value=DRIFT>DRIFT</option>
<option value=EXO>EXO</option>
<option value=FERMI-LAT>FERMI-LAT</option>
<option value=FNAL-E-0687>FNAL-E-0687</option>
<option value=FNAL-E-0690>FNAL-E-0690</option>
<option value=FNAL-E-0706>FNAL-E-0706</option>
<option value=FNAL-E-0740>FNAL-E-0740 (D0 Run I)</option>
<option value=FNAL-E-0741>FNAL-E-0741 (CDF Run I)</option>
<option value=FNAL-E-0799>FNAL-E-0799 (KTeV)</option>
<option value=FNAL-E-0815>FNAL-E-0815 (NuTeV)</option>
<option value=FNAL-E-0823>FNAL-E-0823 (D0 Run II)</option>
<option value=FNAL-E-0830>FNAL-E-0830 (CDF Run II)</option>
<option value=FNAL-E-0831>FNAL-E-0831 (FOCUS)</option>
<option value=FNAL-E-0832>FNAL-E-0832 (KTeV)</option>
<option value=FNAL-E-0872>FNAL-E-0872 (DONUT)</option>
<option value=FNAL-E-0875>FNAL-E-0875 (MINOS)</option>
<option value=FNAL-E-0886>FNAL-E-0886 (FNPL)</option>
<option value=FNAL-E-0892>FNAL-E-0892 (USCMS)</option>
<option value=FNAL-E-0898>FNAL-E-0898 (MiniBooNE)</option>
<option value=FNAL-E-0904>FNAL-E-0904 (MUCOOL)</option>
<option value=FNAL-E-0906>FNAL-E-0906 (NuSea)</option>
<option value=FNAL-E-0907>FNAL-E-0907 (MIPP)</option>
<option value=FNAL-E-0907>FNAL-E-0918 (BTeV)</option>
<option value=FNAL-E-0907>FNAL-E-0973 (Mu2e)</option>
<option value=FNAL-E-0937>FNAL-E-0937 (FINeSSE)</option>
<option value=FNAL-E-0938>FNAL-E-0938 (MINERvA)</option>
<option value=FNAL-E-0954>FNAL-E-0954 (SciBooNE)</option>
<option value=FNAL-E-0961>FNAL-E-0961 (COUPP)</option>
<option value=FNAL-E-0974>FNAL-E-0974</option>
<option value=FNAL-LC>FNAL-LC</option>
<option value=FNAL-P-0929>FNAL-P-0929 (NOvA)</option>
<option value=FNAL-T-0962>FNAL-T-0962 (ArgoNeuT)</option>
<option value=FRASCATI-DAFNE-KLOE>FRASCATI-DAFNE-KLOE</option>
<option value=FREJUS-NEMO-3>FREJUS-NEMO-3</option>
<option value=GERDA>GERDA</option>
<option value=GSI-HADES>GSI-HADES</option>
<option value=GSI-SIS-ALADIN>GSI-SIS-ALADIN</option>
<option value=HARP>HARP</option>
<option value=HESS>HESS</option>
<option value=ICECUBE>ICECUBE</option>
<option value=ILC>ILC</option>
<option value=JLAB-E-01-104>JLAB-E-01-104</option>
<option value=KAMLAND>KAMLAND</option>
<option value=KASCADE-GRANDE>KASCADE-GRANDE</option>
<option value=KATRIN>KATRIN</option>
<option value=KEK-BF-BELLE>KEK-BF-BELLE</option>
<option value=KEK-BF-BELLE-II>KEK-BF-BELLE-II</option>
<option value=KEK-T2K>KEK-T2K</option>
<option value=LBNE>LBNE</option>
<option value=LIGO>LIGO</option>
<option value=LISA>LISA</option>
<option value=LSST>LSST</option>
<option value=MAGIC>MAGIC</option>
<option value=MAJORANA>MAJORANA</option>
<option value=MICE>MICE</option>
<option value=PICASSO>PICASSO</option>
<option value=PLANCK>PLANCK</option>
<option value=SDSS>SDSS</option>
<option value=SIMPLE>SIMPLE</option>
<option value=SLAC-PEP2-BABAR>SLAC-PEP2-BABAR</option>
<option value=SNAP>SNAP</option>
<option value=SSCL-GEM>SSCL-GEM</option>
<option value=SUDBURY-SNO>SUDBURY-SNO</option>
<option value=SUDBURY-SNO+>SUDBURY-SNO+</option>
<option value=SUPER-KAMIOKANDE>SUPER-KAMIOKANDE</option>
<option value=VERITAS>VERITAS</option>
<option value=VIRGO>VIRGO</option>
<option value=WASA-COSY>WASA-COSY</option>
<option value=WMAP>WMAP</option>
<option value=XENON>XENON</option>
</select>
"""
for experiment in experiment_list:
experiments_html = experiments_html.replace('value=' + experiment, 'selected ' + 'value=' + experiment)
html = []
html.append("""<H4>Changes to Existing Records</H4>
<P>Send us your details (or someone else's). See our <a href="http://www.slac.stanford.edu/spires/hepnames/help/adding.shtml">help
for additions</A>.<BR>If something doesnt fit in the form, just put it in
the comments section.</P>
<FORM name="hepnames_addition"
onSubmit="return OnSubmitCheck();"
action=http://www.slac.stanford.edu/cgi-bin/form-mail.pl
method=post><INPUT type=hidden value=nowhere name=to id=tofield>
<INPUT type=hidden value="New HEPNames Posting" name=subject> <INPUT
type=hidden value=2bsupplied name=form_contact id=formcont> <INPUT
type=hidden value=/spires/hepnames/hepnames_msgupd.file name=email_msg_file>
<INPUT type=hidden value=/spires/hepnames/hepnames_resp_msg.file
name=response_msg_file><INPUT type=hidden value=0 name=debug>
<INPUT type=hidden value="1095498" name=key>
<INPUT type=hidden value="" name=field>
<INPUT type=hidden value="" name=current1>
<INPUT type=hidden value="" name=inst2><INPUT type=hidden value="" name=rank2>
<INPUT type=hidden value="" name=ey2><INPUT type=hidden value="" name=sy2>
<INPUT type=hidden value="" name=current2>
<INPUT type=hidden value="" name=inst3><INPUT type=hidden value="" name=rank3>
<INPUT type=hidden value="" name=ey3><INPUT type=hidden value="" name=sy3>
<INPUT type=hidden value="" name=current3>
<INPUT type=hidden value="" name=inst4><INPUT type=hidden value="" name=rank4>
<INPUT type=hidden value="" name=ey4><INPUT type=hidden value="" name=sy4>
<INPUT type=hidden value="" name=current4>
<INPUT type=hidden value="" name=inst5><INPUT type=hidden value="" name=rank5>
<INPUT type=hidden value="" name=ey5><INPUT type=hidden value="" name=sy5>
<INPUT type=hidden value="" name=current5>
<INPUT type=hidden value="" name=inst7><INPUT type=hidden value="" name=rank7>
<INPUT type=hidden value="" name=ey7><INPUT type=hidden value="" name=sy7>
<INPUT type=hidden value="" name=current7>
<INPUT type=hidden value="" name=inst6><INPUT type=hidden value="" name=rank6>
<INPUT type=hidden value="" name=ey6><INPUT type=hidden value="" name=sy6>
<INPUT type=hidden value="" name=current6>
<INPUT type=hidden value="" name=inst8><INPUT type=hidden value="" name=rank8>
<INPUT type=hidden value="" name=ey8><INPUT type=hidden value="" name=sy8>
<INPUT type=hidden value="" name=current8>
<INPUT type=hidden value="" name=inst9><INPUT type=hidden value="" name=rank9>
<INPUT type=hidden value="" name=ey9><INPUT type=hidden value="" name=sy9>
<INPUT type=hidden value="" name=current9>
<INPUT type=hidden value="" name=inst10><INPUT type=hidden value="" name=rank10>
<INPUT type=hidden value="" name=ey10><INPUT type=hidden value="" name=sy10>
<INPUT type=hidden value="" name=current10>
<INPUT type=hidden value="" name=inst11><INPUT type=hidden value="" name=rank11>
<INPUT type=hidden value="" name=ey11><INPUT type=hidden value="" name=sy11>
<INPUT type=hidden value="" name=current11>
<INPUT type=hidden value="" name=inst12><INPUT type=hidden value="" name=rank12>
<INPUT type=hidden value="" name=ey12><INPUT type=hidden value="" name=sy12>
<INPUT type=hidden value="" name=current12>
<INPUT type=hidden value="" name=inst13><INPUT type=hidden value="" name=rank13>
<INPUT type=hidden value="" name=ey13><INPUT type=hidden value="" name=sy13>
<INPUT type=hidden value="" name=current13>
<INPUT type=hidden value="" name=inst14><INPUT type=hidden value="" name=rank14>
<INPUT type=hidden value="" name=ey14><INPUT type=hidden value="" name=sy14>
<INPUT type=hidden value="" name=current14>
<INPUT type=hidden value="" name=inst15><INPUT type=hidden value="" name=rank15>
<INPUT type=hidden value="" name=ey15><INPUT type=hidden value="" name=sy15>
<INPUT type=hidden value="" name=current15>
<INPUT type=hidden value="" name=inst17><INPUT type=hidden value="" name=rank17>
<INPUT type=hidden value="" name=ey17><INPUT type=hidden value="" name=sy17>
<INPUT type=hidden value="" name=current17>
<INPUT type=hidden value="" name=inst16><INPUT type=hidden value="" name=rank16>
<INPUT type=hidden value="" name=ey16><INPUT type=hidden value="" name=sy16>
<INPUT type=hidden value="" name=current16>
<INPUT type=hidden value="" name=inst18><INPUT type=hidden value="" name=rank18>
<INPUT type=hidden value="" name=ey18><INPUT type=hidden value="" name=sy18>
<INPUT type=hidden value="" name=current18>
<INPUT type=hidden value="" name=inst19><INPUT type=hidden value="" name=rank19>
<INPUT type=hidden value="" name=ey19><INPUT type=hidden value="" name=sy19>
<INPUT type=hidden value="" name=current19>
<INPUT type=hidden value="" name=inst20><INPUT type=hidden value="" name=rank20>
<INPUT type=hidden value="" name=ey20><INPUT type=hidden value="" name=sy20>
<INPUT type=hidden value="" name=current20>
<INPUT type=hidden value="today" name=DV>
<TABLE class=form1>
<TBODY>
<TR>
<TD><STRONG>Full name</STRONG></TD>
<TD><INPUT SIZE=24 value=%(full_name)s name=authorname> <FONT SIZE=2>E.G.
Lampen, John Francis</FONT> </TD></TR>
<TR>
<TD><STRONG>Display Name</STRONG></TD>
<TD><INPUT SIZE=24 value=%(display_name)s name='dispname'> <FONT SIZE=2>E.G.
LampC)n, John </FONT><//TD></TR>
<TR>
<TD><STRONG> Your Email</STRONG></TD>
<TD><INPUT SIZE=24 value=%(email)s name='username' ID='username'><FONT SIZE=2>(<STRONG>REQ'D
</strong> but not displayed - contact only)</font> </TD></TR>
<TR>
<TD><STRONG>Email </STRONG>(Public)</TD>
<TD><INPUT SIZE=24 value=%(email_public)s name='email' id='email'>
<input type='button' value='Same as Above' class='formbutton' onclick='copyem();'/>
</TD></TR><tr><TD><STRONG>Status</STRONG></TD><TD>
<SELECT NAME=status>
<OPTION %(is_active)s value=ACTIVE>Active</OPTION>
<OPTION %(is_retired)s value=RETIRED>Retired</OPTION>
<OPTION %(is_departed)s value=DEPARTED>Departed</OPTION>
<OPTION %(is_deceased)s value=DECEASED>Deceased</OPTION>
</SELECT></TD></TR>
<tr><TD><STRONG>Field of research</STRONG></TD><td> <table><tbody><tr>
%(research_field_html)s
</TR></TBODY></TABLE></TD></TR>
<table id="tblGrid" >
<tr>
<td> </td>
<td class="cell_padding"><strong> Institution History</strong><br>
<FONT size=2>Please take this name from <A href="http://inspirehep.net/Institutions"
target=_TOP>Institutions</A><FONT color=red><SUP>*</SUP></FONT></TD>
<td class="cell_padding"><strong>Rank</td>
<td class="cell_padding"><strong>Start Year End Year</td>
<td class="cell_padding"><strong>Current</strong></td>
</tr>
%(institutions_html)s
</table>
<table><tr>
<a href="javascript:addRow();"> Click to add new Institution field row
<img src="/img/rightarrow.gif" ></a></tr></table>
<hr>
<table class="form2"><tbody><tr>
<TD><span class="span_float_right"><STRONG>Ph.D. Advisor</STRONG></span></TD>
<TD><span class="span_float_left"><INPUT SIZE=24 value=%(phd_advisor)s name=Advisor1> <FONT SIZE=2>E.G.
Beacom, John Francis</FONT> </span></TD></TR>
<tr><TD><span class="span_float_right"><STRONG>2nd Ph.D. Advisor</STRONG></span></TD>
<TD><span class="span_float_left"><INPUT SIZE=24 value=%(phd_advisor2)s name=Advisor2> <FONT SIZE=2>E.G.
Beacom, John Francis</FONT> </span></TD></TR>
<TD><span class="span_float_right"><STRONG>Experiments</STRONG></span>
<br /><span class="span_float_right"><FONT size=2>Hold the Control key to choose multiple current or past experiments <br> Experiments not listed can be added in the Comments field below </font></span></td>
<td><span class="span_float_left">
%(experiments_html)s
</span></td></tr>
<TR>
<TD><span class="span_float_right"><STRONG>Your web page</STRONG></span></TD>
<TD><span class="span_float_left"><INPUT SIZE=50 value=%(web)s name= URL></span></TD></TR>
<TR>
<TD><span class="span_float_right">Please send us your <STRONG>Comments</STRONG></span></td>
<TD><span class="span_float_left"><TEXTAREA NAME=Abstract ROWS=3 COLS=30></textarea><FONT SIZE=2>(not displayed)</FONT></span></TD></TR>
<tr><TD> <span class="span_float_right"><font size="1">SPAM Robots have been sending us submissions via this form, in order to prevent this we ask that you confirm that you are a real person by answering this question, which should be
easy for you, and hard for a SPAM robot. Cutting down on the extraneous submissions we get means that we can handle real requests faster.</font></span></td><td><span class="span_float_left">
<script type="text/javascript" src="https://www.slac.stanford.edu/spires/hepnames/spbeat.js">
</SCRIPT><br /><STRONG> How many people in image</STRONG> <SELECT NAME=beatspam ID=beatspam> <OPTION VALUE=""> </OPTION>
<option value="1"> one person</option>
<option value="2"> two people</option><option value="3"> three people</option>
<option value="4"> more than three</option></select></span></td></tr>
</TBODY></TABLE><INPUT type=submit class="formbutton" value="Send Request"><br /><FONT
color=red><SUP>*</SUP></FONT>Institution name should be in the form given
in the <A href="http://inspirehep.net/Institutions"
target=_TOP>INSTITUTIONS</A> database<BR>(e.g. Harvard U. * Paris U.,
VI-VII * Cambridge U., DAMTP * KEK, Tsukuba). </FORM>
""" % {'full_name': xml.sax.saxutils.quoteattr(full_name),
'display_name': xml.sax.saxutils.quoteattr(display_name),
'email': xml.sax.saxutils.quoteattr(email),
'email_public': xml.sax.saxutils.quoteattr(email),
'phd_advisor': xml.sax.saxutils.quoteattr(phd_advisor),
'phd_advisor2': xml.sax.saxutils.quoteattr(phd_advisor2),
'web': xml.sax.saxutils.quoteattr(web_page),
'is_active': is_active,
'is_retired': is_retired,
'is_departed': is_departed,
'is_deceased': is_deceased,
'research_field_html': research_field_html,
'institutions_html': institutions_html,
'experiments_html': experiments_html
})
return "\n".join(html)
# pylint: enable=C0301
def loading_html(self):
return '<img src=/img/ui-anim_basic_16x16.gif> Loading...'
def tmpl_print_searchresultbox(self, bid, header, body):
""" Print a nicely formatted box for search results. """
# first find total number of hits:
out = ('<table class="searchresultsbox" ><thead><tr><th class="searchresultsboxheader">'
+ header + '</th></tr></thead><tbody><tr><td id ="%s" class="searchresultsboxbody">' % bid
+ body + '</td></tr></tbody></table>')
return out
def tmpl_arxiv_box(self, arxiv_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
html_head = _("""<span title="Login through arXiv is needed to verify this is your profile. When you log in your publication list will automatically update with all your arXiv publications.
You may also continue as a guest. In this case your input will be processed by our staff and will take longer to display."><strong> Login with your arXiv.org account </strong></span>""")
if arxiv_data['login']:
if arxiv_data['view_own_profile']:
html_arxiv = _("You have succesfully logged in via arXiv.<br> You can now manage your profile.<br>")
elif arxiv_data['user_has_pid']:
html_arxiv = _(
"You have succesfully logged in via arXiv.<br><font color='red'>However the profile you are viewing is not your profile.<br><br></font>")
own_profile_link = "%s/author/manage_profile/%s" % (CFG_SITE_URL, arxiv_data['user_pid'])
own_profile_text = _("Manage your profile")
html_arxiv += '<a rel="nofollow" href="%s" class="btn btn-default">%s</a>' % (
own_profile_link, own_profile_text)
else:
html_arxiv = _(
"You have succesfully logged in, but<font color='red'> you are not associated to a person yet.<br>Please use the button below to choose your profile<br></font>")
login_link = '%s/author/choose_profile' % CFG_SITE_URL
login_text = _("Choose your profile")
html_arxiv += '<br><a rel="nofollow" href="%s" class="btn btn-default">%s</a>' % (
login_link, login_text)
else:
html_arxiv = _("Please log in through arXiv to manage your profile.<br>")
login_link = "https://arxiv.org/inspire_login"
login_text = _("Login into INSPIRE through arXiv.org")
html_arxiv += '<br><a rel="nofollow" href="%s" class="btn btn-default">%s</a>' % (login_link, login_text)
if loading:
html_arxiv = self.loading_html()
if add_box:
arxiv_box = self.tmpl_print_searchresultbox('arxiv', html_head, html_arxiv)
return arxiv_box
else:
return html_arxiv
def tmpl_orcid_box(self, orcid_data, ln, orcid_info, add_box=True, loading=True):
_ = gettext_set_language(ln)
html_head = _(""" <span title="ORCiD (Open Researcher and Contributor ID) is a unique researcher identifier that distinguishes you from other researchers.
It holds a record of all your research activities. You can add your ORCiD to all your works to make sure they are associated with you. ">
<strong> Connect this profile to an ORCiD </strong> <span>""")
html_orcid = ""
modal = ""
if orcid_data['orcids']:
html_orcid += _(
'This profile is already connected to the following ORCiD: <strong>%s</strong></br>' %
(",".join(['<a rel="nofollow" href="http://www.orcid.org/' + orcidid + '"">' + orcidid + '</a>' for orcidid in orcid_data['orcids']]),))
if orcid_data['arxiv_login'] and orcid_data['own_profile']:
html_orcid += '<br><div class="btn-group"><a rel="nofollow" href="%s" class="btn btn-default ' % (
"%s/author/manage_profile/push_orcid_pubs" % CFG_SITE_SECURE_URL )
if orcid_info == 'running':
html_orcid += 'disabled'
html_orcid +='">%s</a>' % (
_("Push my claimed publications to ORCiD"))
html_orcid += '<button class="btn btn-primary btn-default '
if orcid_info == 'running' :
html_orcid += 'disabled'
html_orcid += '" data-toggle="modal" data-target="#orcidPushHelp"><b>?</b></button></div>'
html_orcid += '<br><br><a rel="nofollow" href="%s" class="btn btn-default">%s</a>' % (
"%s/author/manage_profile/import_orcid_pubs" % CFG_SITE_SECURE_URL,
_("Import my publications from ORCiD"))
modal += '<div class="modal fade" id="orcidPushHelp"> \
<div class="modal-dialog"><div class="modal-content"> \
<div class="modal-header"> \
<h4 class="modal-title">%s</h4> \
</div> \
<div class="modal-body"> \
<p>%s</p></div> \
<div class="modal-footer"> \
<button type="button" class="btn btn-default" data-dismiss="modal">%s</button> \
<a rel="nofollow" href="%s" class="btn btn-primary">%s</a> \
</div></div></div></div>' % (
_("Pushing your claimed publication list to ORCiD"),
_("By pushing your publications list to ORCiD, \
we will send the details of all the papers and \
datasets you have claimed as yours in INSPIRE. \
Every time you perform this operation, only the \
new additions will be submitted to ORCiD."),
_("Go back"),
"%s/author/manage_profile/push_orcid_pubs" % CFG_SITE_SECURE_URL,
_("Push to ORCiD"))
else:
html_orcid += "This profile has not been connected to an ORCiD account yet. "
if orcid_data['arxiv_login'] and (orcid_data['own_profile'] or orcid_data['add_power']):
add_link = "%s/youraccount/oauth2?provider=%s" % (CFG_SITE_URL, 'orcid')
add_text = _("Connect an ORCiD to this profile")
html_orcid += '<br><br><a rel="nofollow" href="%s" class="btn btn-default">%s</a>' % (
add_link, add_text)
else:
suggest_text = _("Suggest an ORCiD for this profile:")
html_orcid += '<br><br> %s <br> <br>' % suggest_text
html_orcid += '<form class="form-inline"><div class="input-append"><input class="input-xlarge" id="suggested_orcid" type="text">'
html_orcid += ' <a id="orcid_suggestion" class="btn btn-default" href="#">'
html_orcid += '<span class="pid hidden">%s</span>%s</a></div></form>' % (
orcid_data['pid'], 'Submit Suggestion')
if loading:
html_orcid = self.loading_html()
if add_box:
orcid_box = self.tmpl_print_searchresultbox('orcid', html_head, html_orcid)
return orcid_box, modal
else:
return html_orcid, modal
def tmpl_claim_paper_box(self, claim_paper_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
html_head = _("""<span title="When you add more publications you make sure your publication list and citations appear correctly on your profile.
You can also assign publications to other authors. This will help %s provide more accurate publication and citation statistics. "><strong> Manage publications </strong><span>""" % BIBAUTHORID_CFG_SITE_NAME)
html_claim_paper = ("")
link = "%s/author/claim/%s?open_claim=True" % (CFG_SITE_URL, claim_paper_data['canonical_id'])
text = _("Manage publication list")
html_claim_paper += 'Assign publications to your %s profile to keep it up to date. </br></br> <a rel="nofollow" href="%s" class="btn btn-default">%s</a>' % (
BIBAUTHORID_CFG_SITE_NAME, link, text)
if loading:
html_claim_paper = self.loading_html()
if add_box:
claim_paper_box = self.tmpl_print_searchresultbox('claim_paper', html_head, html_claim_paper)
return claim_paper_box
else:
return html_claim_paper
def tmpl_orcid_message(self, orcid_info, ln):
_ = gettext_set_language(ln)
html = ''
if orcid_info == 'running':
html = ('<div class="alert alert-info" role="alert">%s</div>' % _('Request \
for pushing ORCID data is being processed. \
Your works will be available in ORCID database soon.'))
elif orcid_info == 'finished':
html = ('<div class="alert alert-success" role="alert">%s</div>' % _('Your \
request for pushing ORCID data was processed succesfully. \
Your works are available in ORCID database.'))
elif orcid_info == 'error':
html = ('<div class="alert alert-danger" role="alert">%s</div>' % _('An \
error occurred when INSPIRE was processing your ORCID data push \
request. Our developers were informed of the issue and \
will fix it.'))
elif orcid_info == 'wrong_account':
html = ('<div class="alert alert-danger" role="alert">%s</div>' % _('You \
authenticated correctly to ORCID, but you are using a different \
account than the one that is connected to your profile on INSPIRE. \
We will not allow you push your works to a different account. \
If you want to change your ORCID on your INSPIRE profile, \
please contact our staff.'))
return html
def tmpl_ext_ids_box(self, personid, int_ids_data, ext_ids_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
html_head = _("<strong> Person identifiers, internal and external </strong>")
html_ext_ids = 'This is personID: %s <br>' % personid
html_ext_ids += '<div> <strong> External ids: </strong><br>'
# if the user has permission to add/remove ids, in other words if the profile is his or he is admin
if ext_ids_data['person_id'] == ext_ids_data['user_pid'] or ext_ids_data['ulevel'] == "admin":
add_text = _('add external id')
add_parameter = 'add_external_id'
remove_text = _('delete selected ids')
remove_parameter = 'delete_external_ids'
add_missing_text = _('Harvest missing external ids from claimed papers')
add_missing_parameter = 'add_missing_external_ids'
else:
add_text = _('suggest external id to add')
add_parameter = 'suggest_external_id_to_add'
remove_text = _('suggest selected ids to delete')
remove_parameter = 'suggest_external_ids_to_delete'
add_missing_text = _('suggest missing ids')
add_missing_parameter = 'suggest_missing_external_ids'
html_ext_ids += '<form method="GET" action="%s/author/claim/action" rel="nofollow">' % (CFG_SITE_URL)
html_ext_ids += '<input type="hidden" name="%s" value="True">' % (add_missing_parameter,)
html_ext_ids += '<input type="hidden" name="pid" value="%s">' % ext_ids_data['person_id']
html_ext_ids += '<br> <input type="submit" class="btn btn-default" value="%s"> </form>' % (add_missing_text,)
if 'ext_ids' in ext_ids_data and ext_ids_data['ext_ids']:
html_ext_ids += '<form method="GET" action="%s/author/claim/action" rel="nofollow">' % (CFG_SITE_URL)
html_ext_ids += ' <input type="hidden" name="%s" value="True">' % (remove_parameter,)
html_ext_ids += ' <input type="hidden" name="pid" value="%s">' % ext_ids_data['person_id']
for key in ext_ids_data['ext_ids']:
try:
sys = [
system for system in PERSONID_EXTERNAL_IDENTIFIER_MAP if PERSONID_EXTERNAL_IDENTIFIER_MAP[system] == key][0]
except (IndexError):
sys = ''
for id_value in ext_ids_data['ext_ids'][key]:
html_ext_ids += '<br> <input type="checkbox" name="existing_ext_ids" value="%s||%s"> <strong> %s: </strong> %s' % (
key, id_value, sys, id_value)
html_ext_ids += ' <br> <br> <input type="submit" class="btn btn-default" value="%s"> <br> </form>' % (
remove_text,)
else:
html_ext_ids += 'UserID: There are no external users associated to this profile!'
html_ext_ids += '<br> <br>'
html_ext_ids += '<form method="GET" action="%s/author/claim/action" rel="nofollow">' % (CFG_SITE_URL)
html_ext_ids += ' <input type="hidden" name="%s" value="True">' % (add_parameter,)
html_ext_ids += ' <input type="hidden" name="pid" value="%s">' % ext_ids_data['person_id']
html_ext_ids += ' <select name="ext_system">'
html_ext_ids += ' <option value="" selected>-- ' + self._('Choose system') + ' --</option>'
for el in PERSONID_EXTERNAL_IDENTIFIER_MAP:
html_ext_ids += ' <option value="%s"> %s </option>' % (PERSONID_EXTERNAL_IDENTIFIER_MAP[el], el)
html_ext_ids += ' </select>'
html_ext_ids += ' <input type="text" name="ext_id" id="ext_id" style="border:1px solid #333; width:350px;">'
html_ext_ids += ' <input type="submit" class="btn btn-default" value="%s" >' % (add_text,)
# html_ext_ids += '<br>NOTE: please note that if you add an external id it
# will replace the previous one (if any).')
html_ext_ids += '<br> </form> </div>'
html_ext_ids += '<br> <div> <strong> Inspire user ID: </strong> <br>'
html_ext_ids += "Current user id: %s <br>" % repr(int_ids_data['uid'])
html_ext_ids += "Previous user ids: %s <br> " % repr(int_ids_data['old_uids'])
html_ext_ids += '<br>'
html_ext_ids += '<form method="GET" action="%s/author/claim/action" rel="nofollow">' % (CFG_SITE_URL)
html_ext_ids += ' <input type="text" name="uid" id="uid" style="border:1px solid #333; width:350px;">'
html_ext_ids += ' <input type="hidden" name="%s" value="True">' % ('set_uid',)
html_ext_ids += ' <input type="hidden" name="pid" value="%s">' % ext_ids_data['person_id']
html_ext_ids += ' <input type="submit" class="btn btn-default" value="%s"> </form>' % (
'Set (steal!) user id',)
html_ext_ids += '</div>'
if loading:
html_ext_ids += self.loading_html()
if add_box:
ext_ids_box = self.tmpl_print_searchresultbox('external_ids', html_head, html_ext_ids)
return ext_ids_box
else:
return html_ext_ids
# for ajax requests add_box and loading are false
def tmpl_autoclaim_box(self, autoclaim_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
html_head = None
if autoclaim_data['hidden']:
return None
html_head = _("""<span title="You don’t need to add all your publications one by one.
This list contains all your publications that were automatically assigned to your INSPIRE profile through arXiv and ORCiD. "><strong> Automatically assigned publications </strong> </span>""")
if loading:
if autoclaim_data['num_of_claims'] == 0:
html_autoclaim = ''
else:
html_autoclaim = _("<span id=\"autoClaimMessage\">Please wait as we are assigning %s papers from external systems to your"
" Inspire profile</span></br>" % (str(autoclaim_data["num_of_claims"])))
html_autoclaim += self.loading_html()
else:
html_autoclaim = ''
if "unsuccessful_recids" in autoclaim_data.keys() and autoclaim_data["unsuccessful_recids"]:
message = ''
if autoclaim_data["num_of_unsuccessful_recids"] > 1:
message = _(
"The following %s publications need your review before they can be assigned to your profile:" %
(str(autoclaim_data["num_of_unsuccessful_recids"]),))
else:
message = _(
"The following publications need your review before they can be assigned to your profile:")
html_autoclaim += "<br><span id=\"autoClaimUnSuccessMessage\">%s</span></br>" % (message,)
html_autoclaim += '<div style="border:2px;height:100px;overflow:scroll;overflow-y:auto;overflow-x:auto;">'
html_autoclaim += '<br><strong>Publication title</strong> <ol type="1"> <br>'
for rec in autoclaim_data['unsuccessful_recids']:
html_autoclaim += '<li> <a href="%s/record/%s"> <b> ' % (
CFG_SITE_URL,
rec) + autoclaim_data['recids_to_external_ids'][rec] + '</b></a></li>\n'
html_autoclaim += '</ol><br>\n</div>'
link = "%s/author/ticket/review_autoclaim" % CFG_SITE_URL
text = _("Review assigning")
html_autoclaim += '<br><span class=\"bsw\"><a rel="nofollow" href="%s" class="btn btn-default">%s</a></span><br><br>' % (
link, text)
if "successful_recids" in autoclaim_data.keys() and autoclaim_data["successful_recids"]:
message = _('The following publications have been successfully assigned to your profile:\n')
html_autoclaim += "<span id=\"autoClaimSuccessMessage\">%s</span><br>" % (message,)
html_autoclaim += '<div style="border:2px;height:300px;overflow:scroll;overflow-y:auto;overflow-x:auto;">'
html_autoclaim += '<br><strong>Publication title</strong> <ol type="1" style="padding-left:20px"> <br>'
for rec in autoclaim_data['successful_recids']:
html_autoclaim += '<li> <a href="%s/record/%s"> <b> ' % (
CFG_SITE_URL,
rec) + autoclaim_data['recids_to_external_ids'][rec] + '</b></a></li>\n'
html_autoclaim += '</ol><br>\n</div>'
if not html_autoclaim:
html_autoclaim = 'There are no publications to be automatically assigned'
if add_box:
autoclaim_box = self.tmpl_print_searchresultbox('autoclaim', html_head, html_autoclaim)
return autoclaim_box
else:
return '<div id="autoclaim"> %s </div>' % html_autoclaim
def tmpl_support_box(self, support_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
help_link = "%s/author/manage_profile/contact-us" % (CFG_SITE_URL)
help_text = _("Contact Form")
html_head = _("<strong> Contact </strong>")
html_support = _(
"Please contact our user support in case you need help or you just want to suggest some new ideas. We will get back to you. </br>")
html_support += '<br><a rel="nofollow" href="%s" class="btn btn-default contactTrigger">%s</a>' % (help_link, help_text)
if loading:
html_support = self.loading_html()
if add_box:
support_box = self.tmpl_print_searchresultbox('support', html_head, html_support)
return support_box
else:
return html_support
def tmpl_merge_box(self, merge_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
html_head = _("""<span title="It sometimes happens that somebody's publications are scattered among two or more profiles for various reasons
(different spelling, change of name, multiple people with the same name). You can merge a set of profiles together.
This will assign all the information (including publications, IDs and citations) to the profile you choose as a primary profile.
After the merging only the primary profile will exist in the system and all others will be automatically deleted. "><strong> Merge profiles </strong><span>""")
html_merge = _(
"If your or somebody else's publications in %s exist in multiple profiles, you can fix that here. </br>" %
BIBAUTHORID_CFG_SITE_NAME)
merge_link = "%s/author/merge_profiles?search_param=%s&primary_profile=%s" % (
CFG_SITE_URL, merge_data['search_param'], merge_data['canonical_id'])
merge_text = _("Merge profiles")
html_merge += '<br><a rel="nofollow" href="%s" class="btn btn-default">%s</a>' % (merge_link, merge_text)
if loading:
html_merge = self.loading_html()
if add_box:
merge_box = self.tmpl_print_searchresultbox('merge', html_head, html_merge)
return merge_box
else:
return html_merge
def tmpl_hepnames_box(self, hepnames_data, ln, add_box=True, loading=True):
_ = gettext_set_language(ln)
if not loading:
try:
heprec = str(hepnames_data['heprecord'][0])
except (TypeError, KeyError, IndexError):
heprec = ''
if hepnames_data['HaveHep']:
contents = hepnames_data['heprecord']
else:
contents = ''
if not hepnames_data['HaveChoices']:
contents += ("There is no HepNames record associated with this profile. "
"<a href='http://slac.stanford.edu/spires/hepnames/additions.shtml'> Create a new one! </a> <br>"
"The new HepNames record will be visible and associated <br> to this author "
"after manual revision, usually within a few days.")
else:
#<a href="mailto:[email protected]?subject=title&body=something">Mail Me</a>
contents += ("There is no unique HepNames record associated "
"with this profile. <br> Please tell us if you think it is one of "
"the following, or <a href='http://slac.stanford.edu/spires/hepnames/additions.shtml'> Create a new one! </a> <br>"
"<br><br> Possible choices are: ")
# mailbody = ("Hello! Please connect the author profile %s "
# "with the HepNames record %s. Best regards" % (hepnames_data['cid'], '%s'))
# mailstr = '<form method="GET" action="%s/author/manage_profile/connect_author_with_hepname" rel="nofollow">' \
# '<input type="hidden" name="cname" value="%s">' \
# '<input type="hidden" name="hepname" value="%s">' \
# '<input type="submit" class="btn btn-default" value="%s"> </form>' % (CFG_SITE_URL, hepnames_data['cid'], '%s', 'This is the right one!',)
# mailstr = ('''<class="choose_hepname" cname="%s" hepname_rec=%s> This is the right one! </class="choose_hepname">''' % (hepnames_data['cid'], '%s'))
# mailstr = ('''<a href='mailto:%s?subject=HepNames record match: %s %s&body=%s'>'''
# '''This is the right one!</a>''' % ('%s', hepnames_data['cid'], heprec, '%s'))
mailstr = (
'''<a id="hepname_connection" class="btn btn-default" href="#"><span class="cname hidden">%s</span><span class="hepname hidden">%s</span>%s</a>''' %
(hepnames_data['cid'], '%s', 'This is the right one!'))
choices = ['<tr><td>' + x[0] + '</td><td> </td><td align="right">' + mailstr % x[1] + '</td></tr>'
for x in hepnames_data['HepChoices']]
contents += '<table>' + ' '.join(choices) + '</table>'
else:
contents = self.loading_html()
if not add_box:
return contents
else:
return self.tmpl_print_searchresultbox('hepdata', '<strong> HepNames data </strong>', contents)
def tmpl_open_table(self, width_pcnt=False, cell_padding=False, height_pcnt=False):
options = []
if height_pcnt:
options.append('height=%s' % height_pcnt)
if width_pcnt:
options.append('width=%s' % width_pcnt)
else:
options.append('width=100%')
if cell_padding:
options.append('cellpadding=%s' % cell_padding)
else:
options.append('cellpadding=0')
return '<table border=0 %s >' % ' '.join(options)
def tmpl_close_table(self):
return "</table>"
def tmpl_open_row(self):
return "<tr>"
def tmpl_close_row(self):
return "</tr>"
def tmpl_open_col(self):
return "<td valign='top'>"
def tmpl_close_col(self):
return "</td>"
def _grid(self, rows, cols, table_width=False, cell_padding=False):
tmpl = self
def cont(*boxes):
out = []
h = out.append
idx = 0
h(tmpl.tmpl_open_table(width_pcnt=table_width, cell_padding=cell_padding))
for _ in range(rows):
h(tmpl.tmpl_open_row())
for _ in range(cols):
h(tmpl.tmpl_open_col())
h(boxes[idx])
idx += 1
h(tmpl.tmpl_close_col())
h(tmpl.tmpl_close_row())
h(tmpl.tmpl_close_table())
return '\n'.join(out)
return cont
verbiage_dict = {'guest': {'confirmed': 'Papers',
'repealed': 'Papers removed from this profile',
'review': 'Papers in need of review',
'tickets': 'Open Tickets', 'data': 'Data',
'confirmed_ns': 'Papers of this Person',
'repealed_ns': 'Papers _not_ of this Person',
'review_ns': 'Papers in need of review',
'tickets_ns': 'Tickets for this Person',
'data_ns': 'Additional Data for this Person'},
'user': {'owner': {'confirmed': 'Your papers',
'repealed': 'Not your papers',
'review': 'Papers in need of review',
'tickets': 'Your tickets', 'data': 'Data',
'confirmed_ns': 'Your papers',
'repealed_ns': 'Not your papers',
'review_ns': 'Papers in need of review',
'tickets_ns': 'Your tickets',
'data_ns': 'Additional Data for this Person'},
'not_owner': {'confirmed': 'Papers',
'repealed': 'Papers removed from this profile',
'review': 'Papers in need of review',
'tickets': 'Your tickets', 'data': 'Data',
'confirmed_ns': 'Papers of this Person',
'repealed_ns': 'Papers _not_ of this Person',
'review_ns': 'Papers in need of review',
'tickets_ns': 'Tickets you created about this person',
'data_ns': 'Additional Data for this Person'}},
'admin': {'confirmed': 'Papers',
'repealed': 'Papers removed from this profile',
'review': 'Papers in need of review',
'tickets': 'Tickets', 'data': 'Data',
'confirmed_ns': 'Papers of this Person',
'repealed_ns': 'Papers _not_ of this Person',
'review_ns': 'Papers in need of review',
'tickets_ns': 'Request Tickets',
'data_ns': 'Additional Data for this Person'}}
buttons_verbiage_dict = {
'guest': {'mass_buttons': {'no_doc_string': 'Sorry, there are currently no documents to be found in this category.',
'b_confirm': 'Yes, those papers are by this person.',
'b_repeal': 'No, those papers are not by this person',
'b_to_others': 'Assign to another person',
'b_forget': 'Forget decision'},
'record_undecided': {'alt_confirm': 'Confirm!',
'confirm_text': 'Yes, this paper is by this person.',
'alt_repeal': 'Rejected!',
'repeal_text': 'No, this paper is <i>not</i> by this person',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_confirmed': {'alt_confirm': 'Confirmed.',
'confirm_text': 'Marked as this person\'s paper',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision.',
'alt_repeal': 'Repeal!',
'repeal_text': 'But it\'s <i>not</i> this person\'s paper.',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_repealed': {'alt_confirm': 'Confirm!',
'confirm_text': 'But it <i>is</i> this person\'s paper.',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision.',
'alt_repeal': 'Repealed',
'repeal_text': 'Marked as not this person\'s paper',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'}},
'user': {'owner': {'mass_buttons': {'no_doc_string': 'Sorry, there are currently no documents to be found in this category.',
'b_confirm': 'These are mine!',
'b_repeal': 'These are not mine!',
'b_to_others': 'It\'s not mine, but I know whose it is!',
'b_forget': 'Forget decision'},
'record_undecided': {'alt_confirm': 'Mine!',
'confirm_text': 'This is my paper!',
'alt_repeal': 'Not mine!',
'repeal_text': 'This is not my paper!',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_confirmed': {'alt_confirm': 'Not Mine.',
'confirm_text': 'Marked as my paper!',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget assignment decision',
'alt_repeal': 'Not Mine!',
'repeal_text': 'But this is not mine!',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_repealed': {'alt_confirm': 'Mine!',
'confirm_text': 'But this is my paper!',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision!',
'alt_repeal': 'Not Mine!',
'repeal_text': 'Marked as not your paper.',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'}},
'not_owner': {'mass_buttons': {'no_doc_string': 'Sorry, there are currently no documents to be found in this category.',
'b_confirm': 'Yes, those papers are by this person.',
'b_repeal': 'No, those papers are not by this person',
'b_to_others': 'Assign to another person',
'b_forget': 'Forget decision'},
'record_undecided': {'alt_confirm': 'Confirm!',
'confirm_text':
'Yes, this paper is by this person.',
'alt_repeal': 'Rejected!',
'repeal_text':
'No, this paper is <i>not</i> by this person',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_confirmed': {'alt_confirm': 'Confirmed.',
'confirm_text': 'Marked as this person\'s paper',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision.',
'alt_repeal': 'Repeal!',
'repeal_text':
'But it\'s <i>not</i> this person\'s paper.',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_repealed': {'alt_confirm': 'Confirm!',
'confirm_text':
'But it <i>is</i> this person\'s paper.',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision.',
'alt_repeal': 'Repealed',
'repeal_text': 'Marked as not this person\'s paper',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'}}},
'admin': {'mass_buttons': {'no_doc_string': 'Sorry, there are currently no documents to be found in this category.',
'b_confirm': 'Yes, those papers are by this person.',
'b_repeal': 'No, those papers are not by this person',
'b_to_others': 'Assign to another person',
'b_forget': 'Forget decision'},
'record_undecided': {'alt_confirm': 'Confirm!',
'confirm_text': 'Yes, this paper is by this person.',
'alt_repeal': 'Rejected!',
'repeal_text': 'No, this paper is <i>not</i> by this person',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_confirmed': {'alt_confirm': 'Confirmed.',
'confirm_text': 'Marked as this person\'s paper',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision.',
'alt_repeal': 'Repeal!',
'repeal_text': 'But it\'s <i>not</i> this person\'s paper.',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'},
'record_repealed': {'alt_confirm': 'Confirm!',
'confirm_text': 'But it <i>is</i> this person\'s paper.',
'alt_forget': 'Forget decision!',
'forget_text': 'Forget decision.',
'alt_repeal': 'Repealed',
'repeal_text': 'Marked as not this person\'s paper',
'to_other_text': 'Assign to another person',
'alt_to_other': 'To other person!'}}}
| gpl-2.0 | 261,724,183,691,193,570 | 52.27154 | 267 | 0.493982 | false | 4.021654 | false | false | false |
iw3hxn/LibrERP | mrp_bom_history/wizard/bom_add_note.py | 1 | 1132 | from mail.mail_message import truncate_text
from openerp.osv import orm, fields
from tools.translate import _
class bom_add_note(orm.TransientModel):
"""Adds a new note to the case."""
_name = 'bom.add.note'
_description = "Add Internal Note"
_columns = {
'body': fields.text('Note Body', required=True),
}
def action_add(self, cr, uid, ids, context=None):
if context is None:
context = {}
if not context.get('active_model'):
raise orm.except_orm(_('Error'), _('Can not add note!'))
model = context.get('active_model')
case_pool = self.pool.get(model)
for obj in self.browse(cr, uid, ids, context=context):
case_list = case_pool.browse(cr, uid, context['active_ids'],
context=context)
case = case_list[0]
case_pool.message_append(cr, uid, [case], truncate_text(obj.body),
body_text=obj.body)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,780,630,179,386,807,000 | 32.294118 | 78 | 0.576855 | false | 3.811448 | false | false | false |
glennguy/plugin.video.9now | resources/lib/menu.py | 1 | 4419 | import xbmcgui
import xbmcplugin
import comm
import config
import sys
import urlparse
import urllib
from aussieaddonscommon import utils
_url = sys.argv[0]
_handle = int(sys.argv[1])
def list_categories():
"""
Make initial list
"""
try:
listing = []
categories = config.CATEGORIES
for category in categories:
li = xbmcgui.ListItem(category)
url_string = '{0}?action=listcategories&category={1}'
url = url_string.format(_url, category)
is_folder = True
listing.append((url, li, is_folder))
genres = comm.list_genres()
for g in genres:
li = xbmcgui.ListItem(g.title, iconImage=g.thumb,
thumbnailImage=g.thumb)
li.setArt({'fanart': g.fanart})
url_string = '{0}?action=listcategories&category=genre&genre={1}'
url = url_string.format(_url, g.title)
is_folder = True
listing.append((url, li, is_folder))
li = xbmcgui.ListItem('Settings')
listing.append(('{0}?action=settings'.format(_url), li, is_folder))
xbmcplugin.addDirectoryItems(_handle, listing, len(listing))
xbmcplugin.endOfDirectory(_handle)
except Exception:
utils.handle_error('Unable to list categories')
def make_episodes_list(url):
""" Make list of episode Listitems for Kodi"""
try:
params = dict(urlparse.parse_qsl(url))
episodes = comm.list_episodes(params)
listing = []
for e in episodes:
li = xbmcgui.ListItem(e.title, iconImage=e.thumb,
thumbnailImage=e.thumb)
li.setArt({'fanart': e.fanart})
url = '{0}?action=listepisodes{1}'.format(_url, e.make_kodi_url())
is_folder = False
li.setProperty('IsPlayable', 'true')
if e.drm is True:
li.setProperty('inputstreamaddon', 'inputstream.adaptive')
li.setInfo('video', {'plot': e.desc,
'plotoutline': e.desc,
'duration': e.duration,
'date': e.get_airdate()})
listing.append((url, li, is_folder))
xbmcplugin.addDirectoryItems(_handle, listing, len(listing))
xbmcplugin.endOfDirectory(_handle)
except Exception:
utils.handle_error('Unable to list episodes')
def make_live_list(url):
""" Make list of channel Listitems for Kodi"""
try:
params = dict(urlparse.parse_qsl(url))
channels = comm.list_live(params)
listing = []
for c in channels:
li = xbmcgui.ListItem(c.title, iconImage=c.thumb,
thumbnailImage=c.thumb)
li.setArt({'fanart': c.fanart})
url = '{0}?action=listchannels{1}'.format(_url, c.make_kodi_url())
is_folder = False
li.setProperty('IsPlayable', 'true')
li.setInfo('video', {'plot': c.desc,
'plotoutline': c.episode_name})
listing.append((url, li, is_folder))
xbmcplugin.addDirectoryItems(_handle, listing, len(listing))
xbmcplugin.endOfDirectory(_handle)
except Exception:
utils.handle_error('Unable to list channels')
def make_series_list(url):
""" Make list of series Listitems for Kodi"""
try:
params = dict(urlparse.parse_qsl(url))
series_list = comm.list_series()
filtered = []
if 'genre' in params:
for s in series_list:
if s.genre == urllib.unquote_plus(params['genre']):
filtered.append(s)
else:
filtered = series_list
listing = []
for s in filtered:
li = xbmcgui.ListItem(s.title, iconImage=s.thumb,
thumbnailImage=s.thumb)
li.setArt({'fanart': s.fanart})
url = '{0}?action=listseries{1}'.format(_url, s.make_kodi_url())
is_folder = True
listing.append((url, li, is_folder))
xbmcplugin.addSortMethod(
_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
xbmcplugin.addDirectoryItems(_handle, listing, len(listing))
xbmcplugin.endOfDirectory(_handle)
except Exception:
utils.handle_error('Unable to list series')
| gpl-3.0 | -3,547,577,061,269,654,000 | 34.926829 | 78 | 0.560308 | false | 4.02459 | false | false | false |
vrde/pandora | pandora/migrations/0001_initial.py | 1 | 2086 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Item'
db.create_table(u'pandora_item', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('filename', self.gf('django.db.models.fields.CharField')(unique=True, max_length=1024)),
('dt', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('description', self.gf('django.db.models.fields.TextField')(null=True)),
('mimetype', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('size', self.gf('django.db.models.fields.BigIntegerField')()),
))
db.send_create_signal(u'pandora', ['Item'])
def backwards(self, orm):
# Deleting model 'Item'
db.delete_table(u'pandora_item')
models = {
u'pandora.item': {
'Meta': {'ordering': "['-dt']", 'object_name': 'Item'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'dt': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1024'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'size': ('django.db.models.fields.BigIntegerField', [], {})
}
}
complete_apps = ['pandora'] | mit | 2,874,409,250,318,841,300 | 46.431818 | 108 | 0.584372 | false | 3.627826 | false | false | false |
acil-bwh/SlicerCIP | Scripted/attic/CIP_GetImage/CIP_GetImage.py | 1 | 28162 | """ACIL_GetImage is a module developed for the internal use of the Applied Chest Imaging Laboratory to download
cases stored in MAD server via ssh.
It works both in Unix/Mac/Windows, and it uses an internal SSH key created specifically for this purpose, so it
doesn't need that the user has an authorized SSH key installed.
First version: Jorge Onieva (ACIL, [email protected]). Sept 2014"""
import os, sys
from __main__ import vtk, qt, ctk, slicer
from collections import OrderedDict
import subprocess
# Add the CIP common library to the path if it has not been loaded yet
try:
from CIP.logic.SlicerUtil import SlicerUtil
except Exception as ex:
currentpath = os.path.dirname(os.path.realpath(__file__))
# We assume that CIP_Common is in the development structure
path = os.path.normpath(currentpath + '/../../Scripted/CIP_Common')
if not os.path.exists(path):
# We assume that CIP is a subfolder (Slicer behaviour)
path = os.path.normpath(currentpath + '/CIP')
sys.path.append(path)
print(("The following path was manually added to the PythonPath in CIP_GetImage: " + path))
from CIP.logic.SlicerUtil import SlicerUtil
from CIP.logic import Util
import CIP.ui as CIPUI
class CIP_GetImage:
"""Load cases from a SSH server or other device"""
def __init__(self, parent):
"""Constructor for main class"""
self.parent = parent
#ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "CIP GetImage"
self.parent.categories = ["Chest Imaging Platform.Modules"]
self.parent.dependencies = []
self.parent.contributors = ["Jorge Onieva", "Applied Chest Imaging Laboratory", "Brigham and Women's Hospital"]
self.parent.helpText = "This is an internal module to load images from MAD repository via SSH"
self.parent.acknowledgementText = SlicerUtil.ACIL_AcknowledgementText
class CIP_GetImageWidget:
"""Visual object"""
# Study ids. Convention: Descriptive text (key) / Name of the folder in the server
studyIds = OrderedDict()
studyIds["Study 1"] = "Study1"
studyIds["Study 2"] = "Study2"
studyIds["Other"] = "Other"
# Image types. You can add as many as different volume types you have
# Convention:
# Descriptive text (key)
# Files extension (example: "processed").
imageTypes = OrderedDict()
imageTypes["CT"] = "" # Default. No extension
imageTypes["CT Processed"] = "processed" # Default. No extension
# Label maps types. Idem
# Convention:
# Descriptive text (key)
# Checked by default
# Files extension (example: case_partialLungLabelMap.nrrd)
labelMapTypes = OrderedDict()
labelMapTypes["Partial Lung"] = (False, "_partialLungLabelMap")
labelMapTypes["Body Composition"] = (False, "_bodyComposition")
labelMapTypes["Body Composition (interactive)"] = (False, "_interactiveBodyComposition")
def __init__(self, parent = None):
"""Widget constructor (existing module)"""
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
def setup(self):
"""Init the widget """
self.modulePath = SlicerUtil.getModuleFolder("CIP_GetImage")
self.resourcesPath = os.path.join(self.modulePath, "CIP_GetImage_Resources")
self.StudyId = ""
self.logic = CIP_GetImageLogic(self.modulePath)
# Widget to load cases faster
self.loadSaveDatabuttonsWidget = CIPUI.LoadSaveDataWidget(parentWidget=self.parent)
self.loadSaveDatabuttonsWidget.setup(moduleName="CIP_GetImage")
#
# Obligatory parameters area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Image data"
self.layout.addWidget(parametersCollapsibleButton)
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
# Study radio buttons
label = qt.QLabel()
label.text = "Select the study:"
parametersFormLayout.addRow(label)
self.rbgStudy=qt.QButtonGroup()
for key in self.studyIds:
rbStudyid = qt.QRadioButton(key)
self.rbgStudy.addButton(rbStudyid)
parametersFormLayout.addWidget(rbStudyid)
self.txtOtherStudy = qt.QLineEdit()
self.txtOtherStudy.hide()
parametersFormLayout.addWidget(self.txtOtherStudy)
# Case id
self.txtCaseId = qt.QLineEdit()
parametersFormLayout.addRow("Case ID ", self.txtCaseId)
# Image types
label = qt.QLabel()
label.text = "Select the images that you want to load:"
parametersFormLayout.addRow(label)
self.cbsImageTypes = []
for key in self.imageTypes:
check = qt.QCheckBox()
check.checked = True
check.setText(key)
parametersFormLayout.addWidget(check)
self.cbsImageTypes.append(check)
# Label maps
label = qt.QLabel()
label.text = "Select the label maps that you want to load:"
parametersFormLayout.addRow(label)
# Labelmap types checkboxes
self.cbsLabelMapTypes = []
for key in self.labelMapTypes:
check = qt.QCheckBox()
check.setText(key)
check.checked = self.labelMapTypes[key][0]
parametersFormLayout.addWidget(check)
self.cbsLabelMapTypes.append(check)
# Load image Button
self.downloadButton = qt.QPushButton("Download")
self.downloadButton.toolTip = "Load the image"
#self.downloadButton.enabled = False
self.downloadButton.setStyleSheet("background-color: green; font-weight:bold; color:white" )
parametersFormLayout.addRow(self.downloadButton)
self.downloadButton.connect('clicked (bool)', self.onDownloadButton)
# Information message
self.lblDownloading = qt.QLabel()
self.lblDownloading.text = "Downloading images. Please wait..."
self.lblDownloading.hide()
parametersFormLayout.addRow(self.lblDownloading)
#
# Optional Parameters
#
optionalParametersCollapsibleButton = ctk.ctkCollapsibleButton()
optionalParametersCollapsibleButton.text = "Optional parameters"
self.layout.addWidget(optionalParametersCollapsibleButton)
optionalParametersFormLayout = qt.QFormLayout(optionalParametersCollapsibleButton)
# Local storage (Slicer temporary path)
self.localStoragePath = "{0}/CIP".format(slicer.app.temporaryPath)
if not os.path.exists(self.localStoragePath):
os.makedirs(self.localStoragePath)
# Make sure that everybody has write permissions (sometimes there are problems because of umask)
os.chmod(self.localStoragePath, 0o777)
self.storagePathButton = ctk.ctkDirectoryButton()
self.storagePathButton.directory = self.localStoragePath
optionalParametersFormLayout.addRow("Local directory: ", self.storagePathButton)
# Connection type (SSH, "normal")
label = qt.QLabel()
label.text = "Connection type:"
optionalParametersFormLayout.addRow(label)
self.rbgConnectionType=qt.QButtonGroup()
self.rbSSH = qt.QRadioButton("SSH (secure connection)")
self.rbSSH.setChecked(True)
self.rbgConnectionType.addButton(self.rbSSH)
optionalParametersFormLayout.addWidget(self.rbSSH)
self.rbCP = qt.QRadioButton("Common")
self.rbgConnectionType.addButton(self.rbCP)
optionalParametersFormLayout.addWidget(self.rbCP)
# SSH Server login
self.txtServer = qt.QLineEdit()
s = SlicerUtil.settingGetOrSetDefault("CIP_GetImage", "server", "This is your ssh user and server. Example: [email protected]")
self.txtServer.text = s # This is your ssh user and server. Example: [email protected]"
optionalParametersFormLayout.addRow("Server:", self.txtServer)
# Server root path
self.txtServerpath = qt.QLineEdit()
s = SlicerUtil.settingGetOrSetDefault("CIP_GetImage", "serverRootPath", "This is your root path to search for files. Ex: /Cases/Processed")
self.txtServerpath.text = s # This is your root path to search for files. Ex: /Cases/Processed
optionalParametersFormLayout.addRow("Server root path:", self.txtServerpath)
# SSH Private key
self.txtPrivateKeySSH = qt.QLineEdit()
s = SlicerUtil.settingGetOrSetDefault("CIP_GetImage", "sshKey", "")
self.txtPrivateKeySSH.text = s # this is the full path to your ssh key if you need it. Be aware of Unix/Windows comaptibility (hint: use os.path.join)
# Please notice that you won't need a SSH key if your computer already has one locally installed"
optionalParametersFormLayout.addRow("SSH private key (leave blank for computer's default): ", self.txtPrivateKeySSH)
# Cache mode
self.cbCacheMode = qt.QCheckBox("Cache mode activated")
self.cbCacheMode.setChecked(True) # Cache mode is activated by default
optionalParametersFormLayout.addRow("", self.cbCacheMode)
# Clean cache Button
self.cleanCacheButton = qt.QPushButton("Clean cache")
self.cleanCacheButton.toolTip = "Remove all the local cached files"
optionalParametersFormLayout.addRow(self.cleanCacheButton)
optionalParametersCollapsibleButton.collapsed = True
if SlicerUtil.IsDevelopment:
# reload button
self.reloadButton = qt.QPushButton("Reload (just development)")
self.reloadButton.toolTip = "Reload this module (for development purposes)."
self.reloadButton.name = "Reload"
self.layout.addWidget(self.reloadButton)
self.reloadButton.connect('clicked()', self.onReload)
# Add vertical spacer
self.layout.addStretch(1)
# Connections
self.rbgStudy.connect("buttonClicked (QAbstractButton*)", self.onRbStudyClicked)
self.txtOtherStudy.connect("textEdited (QString)", self.onTxtOtherStudyEdited)
self.rbgConnectionType.connect("buttonClicked (QAbstractButton*)", self.onRbgConnectionType)
self.storagePathButton.connect("directorySelected(QString)", self.onTmpDirChanged)
self.cleanCacheButton.connect('clicked (bool)', self.onCleanCacheButtonClicked)
def saveSettings(self):
"""Save the current values in settings to reuse it in future sessions"""
SlicerUtil.setSetting("CIP_GetImage", "sshKey", self.txtPrivateKeySSH.text)
SlicerUtil.setSetting("CIP_GetImage", "server", self.txtServer.text)
SlicerUtil.setSetting("CIP_GetImage", "serverRootPath", self.txtServerpath.text)
def cleanup(self):
self.saveSettings()
#
# Events handling
#
def onDownloadButton(self):
"""Click in download button"""
# Check if there is a Study and Case introduced
self.CaseId = self.txtCaseId.text.strip()
if self.CaseId and self.StudyId:
self.lblDownloading.show()
slicer.app.processEvents()
# Get the selected image types and label maps
imageTypes = [self.imageTypes[cb.text] for cb in [check for check in self.cbsImageTypes if check.isChecked()]]
labelMapExtensions = [self.labelMapTypes[cb.text] for cb in [check for check in self.cbsLabelMapTypes if check.isChecked()]]
result = self.logic.loadCase(self.txtServer.text, self.txtServerpath.text, self.StudyId, self.txtCaseId.text, imageTypes, labelMapExtensions, self.localStoragePath, self.cbCacheMode.checkState(), self.rbSSH.isChecked(), self.txtPrivateKeySSH.text)
self.lblDownloading.hide()
if (result == Util.ERROR):
self.msgBox = qt.QMessageBox(qt.QMessageBox.Warning, 'Error', "There was an error when downloading some of the images of this case. It is possible that some of the selected images where not available in the server. Please review the log console for more details.\nSuggested actions:\n-Empty cache\n-Restart Slicer")
self.msgBox.show()
else:
# Show info messsage
self.msgBox = qt.QMessageBox(qt.QMessageBox.Information, 'Attention', "Please make sure that you have selected a study and a case")
self.msgBox.show()
def onRbStudyClicked(self, button):
"""Study radio buttons clicked (any of them)"""
self.StudyId = self.studyIds[button.text]
self.txtOtherStudy.visible = (button.text == "Other")
if (self.txtOtherStudy.visible):
self.StudyId = self.txtOtherStudy.text.strip()
#self.checkDownloadButtonEnabled()
def onRbgConnectionType(self, button):
self.txtServer.enabled = self.txtPrivateKeySSH.enabled = self.rbSSH.isChecked()
#self.txtPrivateKeySSH.enabled = self.rbSSH.checked
def onTxtOtherStudyEdited(self, text):
"""Any letter typed in "Other study" text box """
self.StudyId = text
#self.checkDownloadButtonEnabled()
def onCleanCacheButtonClicked(self):
"""Clean cache button clicked. Remove all the files in the current local storage path directory"""
import shutil
# Remove directory
shutil.rmtree(self.localStoragePath, ignore_errors=True)
# Recreate it (this is a safe method for symbolic links)
os.makedirs(self.localStoragePath)
# Make sure that everybody has write permissions (sometimes there are problems because of umask)
os.chmod(self.localStoragePath, 0o777)
def onTmpDirChanged(self, d):
print(("Temp dir changed. New dir: " + d))
self.localStoragePath = d
def onReload(self, moduleName="CIP_GetImage"):
"""Reload the module. Just for development purposes. This is a combination of the old and new style in modules writing"""
try:
slicer.util.reloadScriptedModule(moduleName)
except:
#Generic reload method for any scripted module.
#ModuleWizard will subsitute correct default moduleName.
import imp, sys
widgetName = moduleName + "Widget"
# reload the source code
# - set source file path
# - load the module to the global space
filePath = eval('slicer.modules.%s.path' % moduleName.lower())
p = os.path.dirname(filePath)
if not sys.path.__contains__(p):
sys.path.insert(0,p)
fp = open(filePath, "r")
globals()[moduleName] = imp.load_module(
moduleName, fp, filePath, ('.py', 'r', imp.PY_SOURCE))
fp.close()
# rebuild the widget
# - find and hide the existing widget
# - create a new widget in the existing parent
# parent = slicer.util.findChildren(name='%s Reload' % moduleName)[0].parent()
parent = self.parent
for child in parent.children():
try:
child.hide()
except AttributeError:
pass
globals()[widgetName.lower()] = eval(
'globals()["%s"].%s(parent)' % (moduleName, widgetName))
globals()[widgetName.lower()].setup()
#
# CIP_GetImageLogic
# This class makes all the operations not related with the user interface (download and handle volumes, etc.)
#
class CIP_GetImageLogic:
def __init__(self, modulePath):
"""Constructor. Adapt the module full path to windows convention when necessary"""
#ScriptedLoadableModuleLogic.__init__(self)
self.modulePath = modulePath
def loadCase(self, server, serverPath, studyId, caseId, imageTypesExtensions, labelMapExtensions, localStoragePath, cacheOn, sshMode, privateKeySSH):
"""Load all the asked images for a case: main images and label maps.
Arguments:
- server -- User and name of the host. Default: [email protected]
- serverPath -- Root path for all the cases. Default: /mad/store-replicated/clients/copd/Processed
- studyId -- Code of the study. Ex: COPDGene
- caseId -- Case id (NOT patient! It will be extracted from here). Example: 12257B_INSP_STD_UIA_COPD
- imageTypesExtensions -- Extensions of the images that must be appended before 'nrrd' in the filename. Default is blank
- labelMapExtensions -- Extensions that must be appended to the file name to find the labelmap. Ex: _partialLungLabelMap
- localStoragePath -- Local folder where all the images will be downloaded
- cacheOn -- When True, the images are not downloaded if they already exist in local
- privateKeySSH -- Full path to the file that contains the private key used to connect with SSH to the server
Returns OK or ERROR
"""
try:
# Extract Patient Id
patientId = caseId.split('_')[0]
for ext in imageTypesExtensions:
locPath = self.downloadNrrdFile(server, serverPath, studyId, patientId, caseId, ext, localStoragePath, cacheOn, sshMode, privateKeySSH)
if (SlicerUtil.IsDevelopment): print("Loading volume stored in " + locPath)
slicer.util.loadVolume(locPath)
for ext in labelMapExtensions:
locPath = self.downloadNrrdFile(server, serverPath, studyId, patientId, caseId, ext[1], localStoragePath, cacheOn, sshMode, privateKeySSH)
if (SlicerUtil.IsDevelopment): print("Loading label map stored in " + locPath)
(code, vtkLabelmapVolumeNode) = slicer.util.loadLabelVolume(locPath, {}, returnNode=True) # Braces are needed for Windows compatibility... No comments...
return Util.OK
except Exception as exception:
print(exception)
return Util.ERROR
def mustSplit(self, labelMapStructure):
return labelMapStructure[3] is not None
def downloadNrrdFile(self, server, serverPath, studyId, patientId, caseId, ext, localStoragePath, cacheOn, sshMode=True, privateKeySSH=None):
"""Download Header and Raw data in a Nrrd file.
Returns the full local path for the nhrd file (header)
"""
localFile = "{0}/{1}{2}.nhdr".format(localStoragePath, caseId, ext)
# If cache mode is not activated or the file does not exist locally, proceed to download
if (not cacheOn or not os.path.isfile(localFile)):
error = False
try:
if os.path.isfile(localFile):
# Delete file previously to avoid confirmation messages
print("Remove cached files: " + localFile)
try:
os.clear(localFile)
os.clear("{0}/{1}{2}.raw.gz".format(localStoragePath, caseId, ext))
except:
print("Error when deleting local files ({0})".format(localFile))
# Make sure that the ssh key has not too many permissions if it is used (otherwise scp will return an error)
if privateKeySSH:
os.chmod(privateKeySSH, 0o600)
# Download header
if (os.sys.platform == "win32"):
localStoragePath = localStoragePath.replace('/', '\\') + '\\'
if sshMode:
if privateKeySSH:
privateKeyCommand = "-privatekey={0}".format(privateKeySSH)
else:
privateKeyCommand = ""
params = [("%s\\CIP_GetImage_Resources\\WinSCP.com" % self.modulePath) ,"/command", 'open {0} {1}'.format(server, privateKeyCommand), \
'get {0}/{1}/{2}/{3}/{3}{4}.nhdr {5}'.format(serverPath, studyId, patientId, caseId, ext, localStoragePath), "exit"]
else:
params = ['copy',"{0}\\{1}\\{2}\\{3}\\{3}{4}.nhdr".format(serverPath, studyId, patientId, caseId, ext), localStoragePath]
else:
# Unix
if sshMode:
keyCommand = ("-i %s " % privateKeySSH) if privateKeySSH else "" # Set a command if privateKeySsh has any value (non empty)
params = ['scp',"{0}{1}:{2}/{3}/{4}/{5}/{5}{6}.nhdr".format(keyCommand, server, serverPath, studyId, patientId, caseId, ext), localStoragePath]
else:
params = ['cp',"{0}/{1}/{2}/{3}/{3}{4}.nhdr".format(serverPath, studyId, patientId, caseId, ext), localStoragePath]
fullStrCommand = " ".join(params)
(result, output, error) = self.executeDownloadCommand(params)
if (result == Util.ERROR):
print("Error when executing download command. Params:")
print(params)
if (error == None):
error = "Unnknown error"
raise Exception(error)
# Download raw data (just update a parameter)
if (os.sys.platform == "win32"):
if sshMode: paramToModify = 3
else: paramToModify = 1
else:
# Unix
paramToModify = 1
# Replace the name of the parameter
params[paramToModify] = params[paramToModify].replace(".nhdr", ".raw.gz")
# Dowload the raw data
(result, output, error) = self.executeDownloadCommand(params)
if (result == Util.ERROR):
print ("Error when executing download command. Params:")
print (params)
if (error == None):
error = "Unnknown error"
raise Exception(error)
# If everything goes well, check the the path of the Nrrd file to verify that the file have been correctly downlaoded
missingFiles = ""
if not os.path.isfile(localFile):
missingFiles = missingFiles + localFile + ";"
if not os.path.isfile(localFile.replace(".nhdr", ".raw.gz")):
missingFiles = missingFiles + localFile.replace(".nhdr", ".raw.gz") + ";"
if missingFiles:
raise Exception("The download command did not return any error message, but the following files have not been downloaded: " + missingFiles)
except Exception as ex:
# There was en error in the preferred method. If we are in a Unix system, we will try the backup method
if os.sys.platform != "win32":
print(("There was an error when downloading some of the files: " + error))
print("Trying alternative method...")
self.executeDowloadCommand_Backup(fullStrCommand)
# If everything goes well, check the the path of the Nrrd file to verify that the file have been correctly downlaoded
missingFiles = ""
if not os.path.isfile(localFile): missingFiles = missingFiles + localFile + ";"
if not os.path.isfile(localFile.replace(".nhdr", ".raw.gz")): missingFiles = missingFiles + localFile.replace(".nhdr", ".raw.gz") + ";"
if missingFiles:
raise Exception("After a second attempt, the following files have not been downloaded: " + missingFiles)
print("Apparently it worked!")
else:
raise ex
else:
print("File {0} already cached".format(localFile))
# Return path to the Nrrd header file
return localFile
def executeDownloadCommand(self, params):
"""Execute a command to download fisically the file. It will be different depending on the current platform.
In Unix, we will use the "scp" command.
In Windows, we will use WinSCP tool (attached to the module in "Resources" folder)
It returns a tuple: OK/ERROR, StandardOutput, ErrorMessage"""
if SlicerUtil.IsDevelopment:
print ("Attempt to download with these params:")
print (params)
try:
out = err = None
if (os.sys.platform == "win32"):
# Hide console window
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(params, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo)
print ("Launch process")
# Launch the process
(out, err) = proc.communicate()
print("End of process")
else:
# Preferred method.
proc = subprocess.Popen(params, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Launch the process
(out, err) = proc.communicate()
if SlicerUtil.IsDevelopment:
print("Out: " + out)
print("Err:" + err)
if err:
print("Error returned by system process: " + err)
except Exception as ex:
print("FATAL ERROR IN COPY PROCESS:")
print(ex)
# Fatal error
return (Util.ERROR, out, err)
# In Unix sometimes if there is some error, stderr will contain some value
if err:
return (Util.ERROR, out, err) # ERROR!
## Everything ok
return (Util.OK, out, err)
def executeDowloadCommand_Backup(self, command):
"""Backup function that will be used when the preferred method fails"""
subprocess.check_call(command, shell=True)
subprocess.check_call(command.replace(".nhdr", ".raw.gz"), shell=True)
| bsd-3-clause | -6,395,616,928,657,452,000 | 47.471601 | 331 | 0.586642 | false | 4.448973 | false | false | false |
mvtuong/mysite | v1/blog/models.py | 1 | 2011 | from django.db import models
from django.db.models import permalink
# Create your models here.
class Blog(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(unique=True)
content = models.TextField(default='', blank=True)
description = models.TextField(default='', blank=True)
date = models.DateField(db_index=True, auto_now_add=True)
topic = models.ForeignKey('blog.Topic')
tag = models.ManyToManyField('blog.Tag', blank=True)
images = models.ManyToManyField('blog.BlogImage', blank=True)
files = models.ManyToManyField('blog.BlogFile', blank=True)
hiden = models.BooleanField(default=False)
featured = models.BooleanField(default=False)
def __str__(seft):
return seft.title
@permalink
def get_absolute_url(self):
if (self.topic.name == "Project"):
return ('view_project_post', None, { 'slug': self.slug })
else:
return ('view_blog_post', None, { 'slug': self.slug })
class BlogImage(models.Model):
image = models.ImageField(upload_to="static/user_upload/images/")
def __str__(seft):
return seft.image.url
class BlogFile(models.Model):
file = models.FileField(upload_to="static/user_upload/files/")
def __str__(seft):
return seft.file.url
class Topic(models.Model):
name = models.CharField(max_length=100, db_index=True)
slug = models.SlugField(db_index=True)
description = models.TextField(max_length=500, blank=True)
def __str__(seft):
return seft.name
@permalink
def get_absolute_url(self):
return ('view_blog_topic', None, { 'slug': self.slug })
class Tag(models.Model):
name = models.CharField(max_length=100, db_index=True)
slug = models.SlugField(db_index=True)
description = models.TextField(max_length=500, blank=True)
def __str__(seft):
return seft.name
@permalink
def get_absolute_url(self):
return ('view_blog_tag', None, { 'slug': self.slug }) | apache-2.0 | 2,280,699,659,320,023,600 | 31.451613 | 69 | 0.661363 | false | 3.485269 | false | false | false |
MarilyGunnersLab/MCCE | mcce_stable/bin/pdbdict2tpl.py | 1 | 1624 | #!/usr/bin/python2
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage:'
print ' ', sys.argv[0], 'hicup_pdbdict_file'
sys.exit(0)
fin = open(sys.argv[1]).readlines()
for i_line in range(0, len(fin)):
if (fin[i_line][:7] == 'RESIDUE'):
resname = fin[i_line][10:13]
print '#ONNECT conf atom orbital ires conn ires conn ires conn ires conn ires conn'
print '#ONNECT |-----|----|---------|----|----|----|----|----|----|----|----|----|----|'
for i_line in range(0, len(fin)):
#01234567890123456789012345678901234567890
#RESIDUE RET 49
#CONECT C2 4 C1 C3 1H2 2H2
#ONNECT conf atom orbital ires conn ires conn ires conn ires conn
#ONNECT |-----|----|---------|----|----|----|----|----|----|----|----|----|----|
#CONNECT RSB+1 C19 sp3 0 C9 0 1H19 0 2H19 0 3H19
if (fin[i_line][:6] == 'CONECT'):
atomname = fin[i_line][11:15]
n_connect = int(fin[i_line][18:20])
if (n_connect == 1): orbital = ' s'
elif (n_connect == 2): orbital = 'sp3'
elif (n_connect == 3): orbital = 'sp2'
elif (n_connect == 4): orbital = 'sp3'
else: orbital = 'UNK'
connect = []
for i_connect in range(0, n_connect):
connect.append(fin[i_line][20+i_connect*5:25+i_connect*5])
print 'CONNECT ','%s01' %resname,atomname,' ',orbital, ' 0 %s' * n_connect %tuple(connect)
| mit | -6,502,614,572,368,778,000 | 38.609756 | 108 | 0.454433 | false | 3.064151 | false | false | false |
Balandat/cont_no_regret | old_code/testing.py | 1 | 3136 | '''
Created on Feb 24, 2015
@author: balandat
'''
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
from ContNoRegret.Domains import S
from ContNoRegret.Distributions import Uniform
from ContNoRegret.utils import create_random_Sigmas
from ContNoRegret.LossFunctions import GaussianLossFunction
from scipy.stats import expon
from scipy.interpolate import SmoothBivariateSpline, LSQBivariateSpline
# def compute_constants(gamma):
# c = (gamma-1)**(-1)
# a2 = gamma*(1+gamma)/2
# a1 = gamma - 2*c*a2
# a0 = 1 - c*a1 - c**2*a2
# return c, np.array([a0, a1, a2])
#
# def phi(u, gamma):
# c,a = compute_constants(gamma)
# return ( (u<c)*(gamma/(gamma-1)-np.minimum(u,c))**(-gamma) +
# (u>=c)*(a[0]+a[1]*np.maximum(u,c)+a[2]*np.maximum(u,c)**2) )
#
# def phi_prime(u, gamma):
# c,a = compute_constants(gamma)
# return (u<c)*gamma*(gamma/(gamma-1)-np.minimum(u,c))**(-(1+gamma)) + (u>=c)*(a[1]+2*a[2]*np.maximum(u,c))
#
# def phi_double_prime(u, gamma):
# c,a = compute_constants(gamma)
# return (u<c)*gamma*(1+gamma)*(gamma/(gamma-1)-np.minimum(u,c))**(-(2+gamma)) + (u>=c)*2*a[2]
#
# def phi_inv(u, gamma):
# c,a = compute_constants(gamma)
# b = phi(c, gamma)
# return ( (u<b)*(gamma/(gamma-1)-np.minimum(u,b)**(-1/gamma)) +
# (u>=b)*(-a[1]/2/a[2]+np.sqrt(a[1]**2/4/a[2]**2 - (a[0]-np.maximum(u,b))/a[2])) )
#
# def phi_inv_prime(u, gamma):
# return 1/phi_prime(phi_inv(u, gamma))
#
#
# # Plot some functions
# gammas = [1.25, 1.5, 1.75, 2, 3]
# u = np.linspace(-1.5,5,10000)
# v = np.linspace(0.001,10,10000)
# f,axs = plt.subplots(3,1)
# axs[0].plot(u, np.exp(u-1))
# axs[1].plot(u, np.exp(u-1))
# axs[2].plot(u, np.exp(u-1))
# for gamma in gammas:
# axs[0].plot(u, phi(u,gamma))
# axs[1].plot(u, phi_prime(u,gamma))
# axs[2].plot(u, phi_double_prime(u,gamma))
# plt.show()
# for gamma in gammas:
# # gamma = 1.5
# ctilde = gamma/(gamma-1)
# a2 = 0.5*gamma*(1+gamma)/((ctilde-1)**(2+gamma))
# a1 = gamma/((ctilde-1)**(1+gamma)) - 2*a2
# a0 = 1/((ctilde-1)**gamma) - a1 - a2
#
# def phi(u):
# return (u<1)*(ctilde-np.minimum(u,1))**(-gamma) + (u>=1)*(a0+a1*np.maximum(u,1)+a2*np.maximum(u,1)**2)
#
# def phiprime(u):
# return (u<1)*gamma*(ctilde-np.minimum(u,1))**(-(1+gamma)) + (u>=1)*(a1+2*a2*np.maximum(u,1))
#
# def phiinv(u):
# return (u<1)*(ctilde-np.minimum(u,1)**(-1/gamma)) + (u>=1)*(-a1/2/a2+np.sqrt(a1**2/4/a2**2 - (a0-np.maximum(u,1))/a2))
#
# def phiinvprime(u):
# return 1/phiprime(phiinv(u))
# # return (u<1)/gamma*u**(-1+1/gamma) + (u>=1)*(a1**2-4*a2*(a0-np.maximum(u,1)))**(-1/2)
#
#
# # fig2, (ax2, ax3) = plt.subplots(2, 1)
# # fig3, ax4 = plt.subplots(1)
#
# ax1.plot(u, phi(u))#, u, np.exp(u-1))
# # v = np.linspace(0.001, 5, 10000)
# # ax2.plot(v, phiinv(v), v, 1+np.log(v))
# # ax3.plot(v, phiinvprime(v), v, 1/v)
# # ax4.plot(v, phiinvprime(v)-1/(3*v))
# # print(np.min(phiinvprime(v)-1/(3+v))
# plt.show()
| mit | -3,819,883,864,521,728,500 | 31.677083 | 132 | 0.552296 | false | 2.200702 | false | false | false |
maxprais/psoriassist | psoriassist/models.py | 1 | 4403 | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
class AppUser(models.Model):
user = models.OneToOneField(User)
age = models.IntegerField(null=True)
birthday = models.DateField(null=True)
profile_picture = models.ImageField(null=True)
last_doctor_appointment = models.DateField(null=True)
bio = models.CharField(max_length=2000, null=True)
isMentor = models.BooleanField()
mentoree = models.ManyToManyField('AppUser')
doctor = models.ManyToManyField('Doctor')
def __str__(self):
return self.user.username
class Message(models.Model):
user = models.ForeignKey(AppUser, related_name='initialiseConvo')
other_user = models.ForeignKey(AppUser, related_name='answerConvo')
content = models.TextField()
message_date = models.DateTimeField()
delivered = models.BooleanField(default=False)
def __str__(self):
return self.message_date
class PASIScore(models.Model):
user = models.ForeignKey(AppUser)
score = models.CharField(max_length=100)
def __str__(self):
return self.user.user.username
class LesionSection(models.Model):
section_name = models.CharField(max_length=100)
PASI = models.ForeignKey(PASIScore)
def __str__(self):
return self.section_name
class Lesion(models.Model):
user = models.ForeignKey(AppUser)
name = models.CharField(null=True, max_length=500)
image = models.CharField(max_length=2000, blank=True)
lesion_location = models.ForeignKey(LesionSection, null=True)
date_taken = models.DateTimeField(null=True)
thickness = models.IntegerField(null=True)
redness = models.IntegerField(null=True)
scale = models.IntegerField(null=True)
def __str__(self):
return "%s- %s %s" % (self.user.user.username, self.name, self.date_taken)
class MentalState(models.Model):
user = models.ForeignKey(AppUser)
stress = models.IntegerField()
anxiety = models.IntegerField()
mood = models.IntegerField()
date_taken = models.DateTimeField()
def __str__(self):
return "%s- %s" % (self.user.user.username, self.date_taken)
class Medication(models.Model):
user = models.ForeignKey(AppUser)
name = models.CharField(max_length=800)
prescribed_by = models.ForeignKey('Doctor')
date_prescribed = models.DateField()
expiration_date = models.DateField()
dosage = models.CharField(max_length=2000)
other_info = models.TextField(max_length=2000)
isCurrent = models.BooleanField()
def __str__(self):
return "%s- %s" % (self.user.user.username, self.name)
class Rating(models.Model):
user = models.ManyToManyField(AppUser)
medication = models.ForeignKey(Medication)
effectiveness = models.SmallIntegerField(default=0)
quality_of_life = models.SmallIntegerField(default=0)
adherence = models.SmallIntegerField(default=0)
def __str__(self):
return "%s" % self.medication.name
class Doctor(models.Model):
name = models.CharField(max_length=100)
work_address = models.CharField(max_length=500)
profile_picture = models.ImageField(null=True)
distance_from_user = models.CharField(max_length=300)
def __str__(self):
return self.name
class Appointment(models.Model):
user = models.ManyToManyField(AppUser)
doctor = models.ManyToManyField(Doctor)
date = models.DateTimeField()
location = models.CharField( max_length=800)
type_of_appointment = models.CharField(max_length=100)
reason_for_appointment = models.TextField(max_length=2000)
duration = models.TimeField()
def __str__(self):
return "%s %s %s" % (self.user.user.username, self.doctor.name, self.date)
class ComputerConversation(models.Model):
user = models.ForeignKey(AppUser)
date_sent = models.DateTimeField(auto_now_add=True)
index = models.IntegerField(default=0)
def __str__(self):
return self.user.user.username
#
# def message_time(self):
# date_sent = datetime.now()
# return date_sent
# computer_message = models.TextField(max_length=2000, null=True)
# user_message = models.CharField(max_length=2000, null=True)
| mit | 8,355,907,875,308,252,000 | 30.858209 | 82 | 0.67454 | false | 3.712479 | false | false | false |
rec/grit | grit/command/Version.py | 1 | 1631 | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
import semver
VERSION = re.compile(r'\d+\.\d+\.\d+(?:-\w\d*)')
from grit.Args import ARGS
from grit import ChangeLog
from grit import CommandList
from grit import File
from grit import Git
from grit import GitRoot
from grit import Project
HELP = """
grit v[ersion] [<version-number>]
Without an argument, prints the current project version number.
With an argument, replaces the original version number with the argument.
"""
SAFE = True
def get_version():
files = Project.settings('version')['files']
for f in files:
old_version = File.search(f, VERSION)
if old_version:
return old_version
raise Exception('ERROR: no version number found.')
def version_commit(version_number=None, success=None, failure=None):
root = GitRoot.root()
files = Project.settings('version')['files']
old_version = get_version()
if version_number == old_version:
raise Exception('Version number is already %s' % old_version)
if not version_number:
version_number = semver.increment_string(old_version)
if not CommandList.confirm('update version %s to %s' %
(old_version, version_number)):
return
for f in files:
File.subn(os.path.join(root, f), VERSION, version_number)
if success or failure:
ChangeLog.add_status_line(version_number, success, failure)
Git.git('commit', '-am', 'Set version to %s' % version_number)
def version(version_number=None):
version_commit(version_number)
| artistic-2.0 | -4,114,299,311,459,008,000 | 29.203704 | 82 | 0.676272 | false | 3.775463 | false | false | false |
lukedeo/cross-domain | data_analysis.py | 1 | 7237 | """
Functions to get insight into the data
"""
import sys
import pickle
#
# Categories anaylisis of all the amazon data
#
# Number of products: 2498330
# Multilabel elements: 888864
# Percentage of products with a given category
# ============================================
# Collectibles: 0.000273
# Music: 0.024316
# VideoGames: 0.019782
# Electronics: 0.079275
# Beauty: 0.020367
# Automotive: 0.057635
# Movies & TV: 0.000462
# no_category: 0.016674
# Baby: 0.017930
# Books: 0.408854
# Kitchen: 0.083820
# Everything Else: 0.000734
# Grocery: 0.018467
# MobileApps: 0.000008
# Software: 0.004045
# KindleStore: 0.275891
# SportingGoods: 0.090299
# OfficeProducts: 0.032052
# ArtsAndCrafts: 0.017305
# Magazines: 0.009083
# Appliances: 0.007523
# Toys: 0.029429
# LawnAndGarden: 0.026913
# Tools: 0.051303
# MusicalInstruments: 0.022971
# HealthPersonalCare: 0.047808
def categories_distribution(partitions_to_use, total_num_partitions):
"""
Gives information about the frequency of categories or number of elements with more than one category
Gets the data from a list of partitions of data
"""
FILE_NAME_TEMPLATE = "data/amazon-data-%s-of-%s.pkl"
multilabel_count = 0
cat_freq = {'no_category': 0}
num_products = 0
count = 1
for i in partitions_to_use:
sys.stdout.write('Analyzing package %d out of %d \r' % (count, len(partitions_to_use)))
sys.stdout.flush()
file_to_open = FILE_NAME_TEMPLATE % (i, total_num_partitions)
[products, prod_reviews] = pickle.load(open(file_to_open))
for review in prod_reviews:
labels = review['labels']
# Count categories, and number of products with more than one label
if len(labels) == 0:
cat_freq['no_category'] += 1
else:
if len(labels) > 1:
multilabel_count += 1
for cat in labels:
if cat in cat_freq:
cat_freq[cat] += 1
else:
cat_freq[cat] = 1
num_products += 1
# Just in case we need to get the data afterwards
# if len(review['labels']) != 0:
# reviews.append(review['text'])
# labels.append(review['labels'][0])
count += 1
#Normalize data
for cat in cat_freq:
cat_freq[cat] = 1.0 * cat_freq[cat] / num_products
# Show data
sys.stdout.write("\nNumber of products: %d" % num_products)
sys.stdout.write("\nMultilabel elements: %d \n" % multilabel_count)
sys.stdout.write("Percentage of products with a given category\n")
sys.stdout.write("============================================\n")
for cat in cat_freq:
sys.stdout.write("%s: %f\n" % (cat, cat_freq[cat]))
sys.stdout.write("")
return cat_freq
def build_social_data(twitter=True, ebay=True):
"""
Builds the twitter data and gets the labels of each item. Allows retrieving from
differents sources.
Returns the data in the format [social_items, label]
"""
TWITTER_FILE = 'data/twitter.pkl'
EBAY_FILE = 'data/ebay.pkl'
# Holds the social items (tweets, ebay reviews...)
social_items = []
# Holds the labels for each social item
# NOTE: For the moment we take the first label we have in the product!
labels = []
multilabel_count = 0
cat_freq = {'no_category': 0}
num_products = 0
count = 1
count = 0
if twitter:
tweets = pickle.load(open(TWITTER_FILE))
for tweet in tweets:
labels = tweet['labels']
# Count categories, and number of products with more than one label
if len(labels) == 0:
cat_freq['no_category'] += 1
else:
if len(labels) > 1:
multilabel_count += 1
for cat in labels:
if cat in cat_freq:
cat_freq[cat] += 1
else:
cat_freq[cat] = 1
num_products += 1
if ebay:
products = pickle.load(open(EBAY_FILE))
for product in products:
labels = product['labels']
# Count categories, and number of products with more than one label
if len(labels) == 0:
cat_freq['no_category'] += 1
else:
if len(labels) > 1:
multilabel_count += 1
for cat in labels:
if cat in cat_freq:
cat_freq[cat] += 1
else:
cat_freq[cat] = 1
num_products += 1
#Normalize data
for cat in cat_freq:
cat_freq[cat] = 1.0 * cat_freq[cat] / num_products
# Show data
sys.stdout.write("\nNumber of products: %d" % num_products)
sys.stdout.write("\nMultilabel elements: %d \n" % multilabel_count)
sys.stdout.write("Percentage of products with a given category\n")
sys.stdout.write("============================================\n")
for cat in cat_freq:
sys.stdout.write("%s: %f\n" % (cat, cat_freq[cat]))
sys.stdout.write("")
return cat_freq
sys.stdout.write('%d elements loaded\n' % count)
return [social_items, labels]
def categories_distribution(labels):
"""
Gives information about the frequency of categories or number of elements with more than one category
Gets the data from a list of labels
"""
multilabel_count = 0
cat_freq = {'no_category': 0}
num_products = 0
count = 1
for i in partitions_to_use:
sys.stdout.write('Analyzing package %d out of %d \r' % (count, len(partitions_to_use)))
sys.stdout.flush()
file_to_open = FILE_NAME_TEMPLATE % (i, total_num_partitions)
[products, prod_reviews] = pickle.load(open(file_to_open))
for review in prod_reviews:
labels = review['labels']
# Count categories, and number of products with more than one label
if len(labels) == 0:
cat_freq['no_category'] += 1
else:
if len(labels) > 1:
multilabel_count += 1
for cat in labels:
if cat in cat_freq:
cat_freq[cat] += 1
else:
cat_freq[cat] = 1
num_products += 1
# Just in case we need to get the data afterwards
# if len(review['labels']) != 0:
# reviews.append(review['text'])
# labels.append(review['labels'][0])
count += 1
#Normalize data
for cat in cat_freq:
cat_freq[cat] = 1.0 * cat_freq[cat] / num_products
# Show data
sys.stdout.write("\nNumber of products: %d" % num_products)
sys.stdout.write("\nMultilabel elements: %d \n" % multilabel_count)
sys.stdout.write("Percentage of products with a given category\n")
sys.stdout.write("============================================\n")
for cat in cat_freq:
sys.stdout.write("%s: %f\n" % (cat, cat_freq[cat]))
sys.stdout.write("")
return cat_freq
| gpl-2.0 | 1,212,668,472,962,764,800 | 28.538776 | 105 | 0.551057 | false | 3.713186 | false | false | false |
ntamas/python-selecta | selecta/renderers.py | 1 | 2075 | """Renderers convert model objects into a visual representation that
can be shown on the UI."""
class Renderer(object):
def attach_to_terminal(self, terminal):
"""Attaches the renderer to the given terminal."""
pass
def render(self, obj, selected=False):
"""Renders the given object into a string that can be printed to
the terminal.
Args:
obj (object): the object to render
selected (bool): whether the object should have a "selected"
appearance
Returns:
str: the string representation of the object, suitable for printing
to the terminal
"""
raise NotImplementedError
class MatchRenderer(Renderer):
"""Converts a ``selecta.matches.Match`` object into a textual
representation that can be printed on the console."""
def attach_to_terminal(self, terminal):
escape_braces = lambda s: s.replace("{", "{{").replace("}", "}}")
self._unselected_templates = {
"match_start": terminal.render("${BG_YELLOW}${FG_BLACK}"),
"match_end": terminal.render("${NORMAL}"),
"start": terminal.render("${NORMAL}"),
"end": terminal.render("${CLEAR_EOL}${NORMAL}")
}
self._selected_templates = {
"match_start": terminal.render("${BG_YELLOW}"),
"match_end": terminal.render("${BG_WHITE}"),
"start": terminal.render("${NORMAL}${BG_WHITE}${FG_BLACK}"),
"end": terminal.render("${CLEAR_EOL}${NORMAL}")
}
def render(self, match, selected=False):
match.canonicalize()
result = list(match.matched_string)
templates = self._selected_templates if selected \
else self._unselected_templates
for start, end in reversed(match.substrings):
result[end:end] = templates["match_end"]
result[start:start] = templates["match_start"]
result[0:0] = templates["start"]
result.extend(templates["end"])
return "".join(result)
| mit | -8,372,224,419,947,170,000 | 36.053571 | 79 | 0.58988 | false | 4.313929 | false | false | false |
krishauser/Klampt | Python/klampt/control/blocks/trajectory_tracking.py | 1 | 2744 | from ..controller import ControllerBlock,RobotControllerIO
from klampt.model import trajectory
class TrajectoryPositionController(ControllerBlock):
"""A (robot) controller that takes in a trajectory and outputs the position
along the trajectory. If type is a 2-tuple, this will also output the
derivative of the trajectory"""
def __init__(self,traj,type=('qcmd','dqcmd')):
self.traj = traj
self.outputType = type
self.startTime = None
def inputNames(self):
return ['t']
def outputNames(self):
if isinstance(self.outputType,(tuple,list)):
return self.outputType
else:
return [self.outputType]
def advance(self,**inputs):
t = inputs['t']
if self.startTime == None:
self.startTime = t
t = t - self.startTime
if isinstance(self.outputType,(tuple,list)):
assert len(self.outputType)==2
return {self.outputType[0]:self.traj.eval(t),
self.outputType[1]:self.traj.deriv(t)}
else:
return {self.outputType:self.traj.eval(t)}
def getState(self):
return {'startTime':self.startTime}
def setState(self,state):
self.startTime = state['startTime']
def signal(self,type,**inputs):
if type=='reset':
self.startTime = None
class TrajectoryWithFeedforwardTorqueController(ControllerBlock):
"""A controller that takes in a joint trajectory and a feedforward torque
trajectory."""
def __init__(self,traj,torquetraj):
self.traj = traj
self.torquetraj = torquetraj
self.startTime = None
def inputNames(self):
return ['t']
def outputNames(self):
return ['qcmd','dqcmd','torquecmd']
def advance(self,**inputs):
api = RobotControllerIO(inputs)
t = api.time()
if self.startTime == None:
self.startTime = t
t = t - self.startTime
return api.makeFeedforwardPIDCommand(self.traj.eval(t),self.traj.deriv(t),self.torquetraj.eval(t))
def getState(self):
return {'startTime':self.startTime}
def setState(self,state):
self.startTime = state['startTime']
def signal(self,type,**inputs):
if type=='reset':
self.startTime = None
def make(robot,file="mypath.path",ff_torque_file=None):
if robot == None:
l = trajectory.Trajectory()
else:
l = trajectory.RobotTrajectory(robot)
l.load(file)
if ff_torque_file is not None:
tcmd = trajectory.Trajectory()
tcmd.load(ff_torque_file)
return TrajectoryWithFeedforwardTorqueController(l,ff_torque_file)
return TrajectoryPositionController(l)
| bsd-3-clause | 4,214,445,663,503,191,600 | 30.906977 | 106 | 0.626093 | false | 3.982583 | false | false | false |
ColmFitz369/docker-repo | python-flask/my_application/app.py | 1 | 1686 | from flask import Flask
from flask import request
import os, sys
app = Flask(__name__)
@app.route('/')
def index():
return 'Index Page'
@app.route('/hello')
def hello():
<<<<<<< HEAD
return 'Hello World'
@app.route('/user/<username>')
def show_user_profile(username):
# show the user profile for that user
return 'User %s' % username
@app.route('/post/<int:post_id>')
def show_post(post_id):
# show the post with the given id, the id is an integer
return 'Post %d' % post_id
=======
return "Hello World!"
@app.route("/")
def address():
return "Dublin!"
@app.route("/")
def message():
return "How are you?"
>>>>>>> e3545d81ac7e72b259f0cbf6387101363a955bbe
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
f = request.files['file']
f.save('./uploads/'+f.filename)
return '',201
@app.route('/chkUploads')
def chk_uploads():
path = "./uploads/"
dirs = os.listdir( path )
f_str=""
for name in dirs:
f_str +=(str)(name)+"\n"
return f_str
@app.route('/eu1')
def run_eu1():
i=0
total=0
while i < 1000: #stop when we reach multiple bigger than 1000
if(i%5==0 or i%3==0): #ie if multiple of 5 or 3
total+=i #add multiple to cumulative tally
i+=1 #next number (will be used only if a valid multiple)
result=" "+(str)(total)+"\n"
return result
@app.route('/eu2')
def run_eu2():
pre,fib,tally=0,1,0 #initialize variables, pre is last term fib is current
MAX=4000000 #4million is maximum value of a term
while fib <= MAX:
if(fib%2): tally+=fib #add to tally is fib term is even
pre,fib=fib,pre+fib #get new values for pre and fib
result=" "+(str)(tally)+"\n"
return result
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| mit | -7,064,585,944,802,551,000 | 24.545455 | 74 | 0.666074 | false | 2.84317 | false | false | false |
Zerknechterer/pyload | module/plugins/internal/XFSAccount.py | 1 | 5969 | # -*- coding: utf-8 -*-
import re
import time
import urlparse
from module.plugins.internal.Account import Account
from module.plugins.internal.SimpleHoster import parseHtmlForm, set_cookies
class XFSAccount(Account):
__name__ = "XFSAccount"
__type__ = "account"
__version__ = "0.38"
__description__ = """XFileSharing account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg" , "[email protected]"),
("Walter Purcaro", "[email protected]" )]
HOSTER_DOMAIN = None
HOSTER_URL = None
LOGIN_URL = None
COOKIES = True
PREMIUM_PATTERN = r'\(Premium only\)'
VALID_UNTIL_PATTERN = r'Premium.[Aa]ccount expire:.*?(\d{1,2} [\w^_]+ \d{4})'
TRAFFIC_LEFT_PATTERN = r'Traffic available today:.*?<b>\s*(?P<S>[\d.,]+|[Uu]nlimited)\s*(?:(?P<U>[\w^_]+)\s*)?</b>'
TRAFFIC_LEFT_UNIT = "MB" #: used only if no group <U> was found
LEECH_TRAFFIC_PATTERN = r'Leech Traffic left:<b>.*?(?P<S>[\d.,]+|[Uu]nlimited)\s*(?:(?P<U>[\w^_]+)\s*)?</b>'
LEECH_TRAFFIC_UNIT = "MB" #: used only if no group <U> was found
LOGIN_FAIL_PATTERN = r'Incorrect Login or Password|account was banned|Error<'
def __init__(self, manager, accounts): #@TODO: remove in 0.4.10
self.init()
return super(XFSAccount, self).__init__(manager, accounts)
def init(self):
if not self.HOSTER_DOMAIN:
self.logError(_("Missing HOSTER_DOMAIN"))
self.COOKIES = False
else:
if not self.HOSTER_URL:
self.HOSTER_URL = "http://www.%s/" % self.HOSTER_DOMAIN
if isinstance(self.COOKIES, list):
self.COOKIES.insert((self.HOSTER_DOMAIN, "lang", "english"))
set_cookies(req.cj, self.COOKIES)
def loadAccountInfo(self, user, req):
validuntil = None
trafficleft = None
leechtraffic = None
premium = None
if not self.HOSTER_URL: #@TODO: Remove in 0.4.10
return {'validuntil' : validuntil,
'trafficleft' : trafficleft,
'leechtraffic': leechtraffic,
'premium' : premium}
html = req.load(self.HOSTER_URL, get={'op': "my_account"}, decode=True)
premium = True if re.search(self.PREMIUM_PATTERN, html) else False
m = re.search(self.VALID_UNTIL_PATTERN, html)
if m:
expiredate = m.group(1).strip()
self.logDebug("Expire date: " + expiredate)
try:
validuntil = time.mktime(time.strptime(expiredate, "%d %B %Y"))
except Exception, e:
self.logError(e)
else:
self.logDebug("Valid until: %s" % validuntil)
if validuntil > time.mktime(time.gmtime()):
premium = True
trafficleft = -1
else:
premium = False
validuntil = None #: registered account type (not premium)
else:
self.logDebug("VALID_UNTIL_PATTERN not found")
m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
if m:
try:
traffic = m.groupdict()
size = traffic['S']
if "nlimited" in size:
trafficleft = -1
if validuntil is None:
validuntil = -1
else:
if 'U' in traffic:
unit = traffic['U']
elif isinstance(self.TRAFFIC_LEFT_UNIT, basestring):
unit = self.TRAFFIC_LEFT_UNIT
else:
unit = ""
trafficleft = self.parseTraffic(size + unit)
except Exception, e:
self.logError(e)
else:
self.logDebug("TRAFFIC_LEFT_PATTERN not found")
leech = [m.groupdict() for m in re.finditer(self.LEECH_TRAFFIC_PATTERN, html)]
if leech:
leechtraffic = 0
try:
for traffic in leech:
size = traffic['S']
if "nlimited" in size:
leechtraffic = -1
if validuntil is None:
validuntil = -1
break
else:
if 'U' in traffic:
unit = traffic['U']
elif isinstance(self.LEECH_TRAFFIC_UNIT, basestring):
unit = self.LEECH_TRAFFIC_UNIT
else:
unit = ""
leechtraffic += self.parseTraffic(size + unit)
except Exception, e:
self.logError(e)
else:
self.logDebug("LEECH_TRAFFIC_PATTERN not found")
return {'validuntil' : validuntil,
'trafficleft' : trafficleft,
'leechtraffic': leechtraffic,
'premium' : premium}
def login(self, user, data, req):
if not self.HOSTER_URL: #@TODO: Remove in 0.4.10
raise Exception(_("Missing HOSTER_DOMAIN"))
if not self.LOGIN_URL:
self.LOGIN_URL = urlparse.urljoin(self.HOSTER_URL, "login.html")
html = req.load(self.LOGIN_URL, decode=True)
action, inputs = parseHtmlForm('name="FL"', html)
if not inputs:
inputs = {'op' : "login",
'redirect': self.HOSTER_URL}
inputs.update({'login' : user,
'password': data['password']})
if action:
url = urlparse.urljoin("http://", action)
else:
url = self.HOSTER_URL
html = req.load(url, post=inputs, decode=True)
if re.search(self.LOGIN_FAIL_PATTERN, html):
self.wrongPassword()
| gpl-3.0 | 6,233,733,894,626,523,000 | 31.977901 | 119 | 0.495895 | false | 3.924392 | false | false | false |
arizvisa/syringe | template/local/pif.py | 1 | 13321 | """
Ripped from https://www.smsoft.ru/en/pifdoc.htm
"""
import ptypes
from ptypes import *
class Heading(pstruct.type):
def __Next_section_offset(self):
return dyn.pointer(Section, pint.uint16_t)
def __Section_data_offset(self):
def _object_(_, self=self):
length = self['Length'].li
return SectionData.withdefault(length.int(), length=length.int())
return dyn.pointer(_object_, pint.uint16_t)
_fields_ = [
(dyn.clone(pstr.string, length=0x10), 'Name'),
(__Next_section_offset, 'NextOffset'),
(__Section_data_offset, 'Offset'),
(pint.uint16_t, 'Length'),
]
class SectionData(ptype.definition):
cache = {}
class default(pstr.string):
pass
class Section(pstruct.type):
def __data(self):
res = self['heading'].li
length = res['Length']
return SectionData.withdefault(length.int(), length=length.int())
def __padding_section(self):
res = self['heading'].li
if res['NextOffset'].int() < 0xffff:
length, fields = res['NextOffset'].int() - self.getoffset(), ['heading', 'data']
return dyn.block(max(0, length - sum(self[fld].li.size() for fld in fields)))
return dyn.block(0)
_fields_ = [
(Heading, 'heading'),
(__data, 'data'),
(__padding_section, 'padding(data)'),
]
class MaximumRequired(pstruct.type):
_fields_ = [
(pint.uint16_t, 'maximum'),
(pint.uint16_t, 'required'),
]
def summary(self):
return "required={:#x} maximum={:#x}".format(self['required'].int(), self['maximum'].int())
@SectionData.define
class BasicSection(pstruct.type):
type = 0x171
@pbinary.littleendian
class _Flags(pbinary.flags):
_fields_ = [
(1, 'COM2'),
(1, 'COM1'),
(1, 'Reserved'),
(1, 'Close on exit'),
(1, 'No screen exchange'),
(1, 'Prevent switch'),
(1, 'Graphics mode'),
(1, 'Direct memory'),
]
@pbinary.littleendian
class _Program_flags(pbinary.flags):
_fields_ = [
(1, 'Unused'),
(1, 'Has parameters'),
(1, 'Exchange interrupt vectors'),
(5, 'Reserved'),
(1, 'Direct screen'),
(1, 'Stop in background mode'),
(1, 'Use coprocessor'),
(1, 'Direct keyboard'),
(4, 'Unknown'),
]
_fields_ = [
(pint.uint8_t, 'Reserved'),
(pint.uint8_t, 'Checksum'),
(dyn.clone(pstr.string, length=30), 'Window title'),
(MaximumRequired, 'Reserved memory'),
(dyn.clone(pstr.string, length=63), 'Path'),
(_Flags, 'Flags'),
(pint.uint8_t, 'Drive index'),
(dyn.clone(pstr.string, length=64), 'Directory'),
(dyn.clone(pstr.string, length=64), 'Parameters'),
(pint.uint8_t, 'Video mode'),
(pint.uint8_t, 'Text video pages quantity'),
(pint.uint8_t, 'First used interrupt'),
(pint.uint8_t, 'Last used interrupt'),
(pint.uint8_t, 'Rows'),
(pint.uint8_t, 'Columns'),
(pint.uint8_t, 'X position'),
(pint.uint8_t, 'Y position'),
(pint.uint16_t, 'Number of last video page'),
(dyn.clone(pstr.string, length=64), 'Shared program path'),
(dyn.clone(pstr.string, length=64), 'Shared program data'),
(_Program_flags, 'Program flags'),
]
@SectionData.define
class Windows386Section(pstruct.type):
type = 0x68
@pbinary.littleendian
class _Bit_mask1(pbinary.flags):
_fields_ = [
(3, 'Unused'),
(1, 'No MS-DOS transition warning'),
(1, 'Unused'),
(1, 'No MS-DOS automatic transition'),
(1, 'Unused'),
(1, 'Prevent Windows detection'),
(1, 'MS-DOS mode'),
(1, 'Unused'),
(1, 'Maximized window'),
(1, 'Minimized window'),
(1, 'Memory protection'),
(1, 'Lock application memory'),
(1, 'Fast paste'),
(1, 'XMS memory locked'),
(1, 'EMS memory locked'),
(1, 'Use shortcut key'),
(1, 'Do not use HMA'),
(1, 'Detect idle time'),
(1, 'No Ctrl+Esc'),
(1, 'No PrtSc'),
(1, 'No Alt+PrtSc'),
(1, 'No Alt+Enter'),
(1, 'No Alt+Space'),
(1, 'No Alt+Esc'),
(1, 'No Alt+Tab'),
(1, 'Unused'),
(1, 'Full-screen mode'),
(1, 'Exclusive run mode'),
(1, 'Background continuation'),
(1, 'Permit exit'),
]
@pbinary.littleendian
class _Bit_mask2(pbinary.flags):
_fields_ = [
(8, 'Unused'),
(1, 'Retain video memory'),
(1, 'Memory: High graphics'),
(1, 'Memory: Low graphics'),
(1, 'Memory: Text graphics'),
(1, 'Ports: High graphics'),
(1, 'Ports: Low graphics'),
(1, 'Ports: Text graphics'),
(1, 'Video ROM emulation'),
]
@pbinary.littleendian
class _Shortcut_modifier(pbinary.flags):
_fields_ = [
(12, 'Unused'),
(1, 'Alt'),
(1, 'Ctrl'),
(2, 'Shift'),
]
_fields_ = [
(MaximumRequired, 'Conventional memory'),
(pint.uint16_t, 'Active priority'),
(pint.uint16_t, 'Background priority'),
(MaximumRequired, 'EMS memory'),
(MaximumRequired, 'XMS memory'),
(_Bit_mask1, 'Bit mask 1'),
(_Bit_mask2, 'Bit mask 2'),
(pint.uint16_t, 'Unknown_16'),
(pint.uint16_t, 'Shortcut key scan code'),
(_Shortcut_modifier, 'Shortcut key modifier'),
(pint.uint16_t, 'Use shortcut key'),
(pint.uint16_t, 'Extended shortcut key'),
(pint.uint16_t, 'Unknown_20'),
(pint.uint16_t, 'Unknown_22'),
(pint.uint32_t, 'Unknown_24'),
(dyn.clone(pstr.string, length=64), 'Parameters'),
]
@SectionData.define
class Windows286Section(pstruct.type):
type = 0x6
@pbinary.littleendian
class _Flags(pbinary.flags):
_fields_ = [
(1, 'COM4'),
(1, 'COM3'),
(8, 'Unused'),
(1, 'No screen retain'),
(1, 'No Ctrl+Esc'),
(1, 'No PrtSc'),
(1, 'No Alt+PrtSc'),
(1, 'No Alt+Esc'),
(1, 'No Alt+Tab'),
]
_fields_ = [
(MaximumRequired, 'XMS memory'),
(_Flags, 'Flags'),
]
@SectionData.define
class WindowsVMM40Section(pstruct.type):
type = 0x1ac
class _Dimensions(pstruct.type):
_fields_ = [
(pint.uint16_t, 'horizontal size'),
(pint.uint16_t, 'vertical size'),
]
@pbinary.littleendian
class _Bit_mask1(pbinary.flags):
_fields_ = [
(10, 'Unknown'),
(1, 'No screensaver'),
(1, 'No exit warning'),
(2, 'Unused'),
(1, 'Continue in background'),
(1, 'Reserved'),
]
@pbinary.littleendian
class _Bit_mask2(pbinary.flags):
_fields_ = [
(7, 'Unknown'),
(1, 'Full-screen mode'),
(1, 'No dynamic video memory'),
(6, 'Unused'),
(1, 'Video-ROM emulation'),
]
@pbinary.littleendian
class _Bit_mask3(pbinary.flags):
_fields_ = [
(4, 'Unknown'),
(1, 'No Ctrl+Esc'),
(1, 'No PrtSc'),
(1, 'No Alt+PrtSc'),
(1, 'No Alt+Enter'),
(1, 'No Alt+Space'),
(1, 'No Alt+Esc'),
(1, 'No Alt+Tab'),
(4, 'Unused'),
(1, 'Fast paste'),
]
@pbinary.littleendian
class _Mouse_flags(pbinary.flags):
_fields_ = [
(14, 'Unused'),
(1, 'Exclusive'),
(1, 'No selection'),
]
@pbinary.littleendian
class _Font_flags(pbinary.flags):
_fields_ = [
(4, 'Unused'),
(1, 'Current TrueType'),
(1, 'Current Raster'),
(5, 'Unknown'),
(1, 'Automatic size'),
(1, 'Use TrueType'),
(1, 'Use Raster'),
(2, 'Reserved'),
]
@pbinary.littleendian
class _Bit_mask4(pbinary.flags):
_fields_ = [
(14, 'Unused'),
(1, 'Show toolbar'),
(1, 'Unknown'),
]
@pbinary.littleendian
class _Last_maximized_flags(pbinary.flags):
_fields_ = [
(14, 'Unknown'),
(1, 'Last maximized'),
(1, 'Reserved'),
]
class _Last_window_state(pint.enum, pint.uint16_t):
_values_ = [
('Normal', 1),
('Minimized', 2),
('Maximized', 3),
]
class _Border_position(pstruct.type):
_fields_ = [
(pint.uint16_t, 'left'),
(pint.uint16_t, 'top'),
(pint.uint16_t, 'right'),
(pint.uint16_t, 'bottom'),
]
_fields_ = [
(dyn.block(88), 'Unknown_0'),
(dyn.clone(pstr.string, length=80), 'Icon filename'),
(pint.uint16_t, 'Icon number'),
(_Bit_mask1, 'Bit mask 1'),
(dyn.block(10), 'Unknown_ac'),
(pint.uint16_t, 'Priority'),
(_Bit_mask2, 'Bit mask 2'),
(dyn.block(8), 'Unknown_ba'),
(pint.uint16_t, 'Number of lines'),
(_Bit_mask3, 'Bit mask 3'),
(pint.uint16_t, 'Unknown_c6'),
(pint.uint16_t, 'Unknown_c8'),
(pint.uint16_t, 'Unknown_ca'),
(pint.uint16_t, 'Unknown_cc'),
(pint.uint16_t, 'Unknown_ce'),
(pint.uint16_t, 'Unknown_d0'),
(pint.uint16_t, 'Unknown_c2'),
(pint.uint16_t, 'Unknown_c4'),
(_Mouse_flags, 'Mouse flags'),
(dyn.block(6), 'Unknown_d8'),
(_Font_flags, 'Font flags'),
(pint.uint16_t, 'Unknown_e0'),
(_Dimensions, 'Raster font size'),
(_Dimensions, 'Current font size'),
(dyn.clone(pstr.string, length=32), 'Raster font name'),
(dyn.clone(pstr.string, length=32), 'TrueType font name'),
(pint.uint16_t, 'Unknown_12a'),
(_Bit_mask4, 'Bit mask 4'),
(pint.uint16_t, 'No restore settings'),
(_Dimensions, 'Screen symbol size'),
(_Dimensions, 'Client area size'),
(_Dimensions, 'Window size'),
(pint.uint16_t, 'Unknown_13c'),
(_Last_maximized_flags, 'Last maximized'),
(_Last_window_state, 'Last start'),
(_Border_position, 'Maximized border position'),
(_Border_position, 'Normal border position'),
(pint.uint32_t, 'Unknown_152'),
(dyn.clone(pstr.string, length=80), 'BAT file name'),
(pint.uint16_t, 'Environment size'),
(pint.uint16_t, 'DPMI memory volume'),
(pint.uint16_t, 'Unknown_1aa'),
]
@SectionData.define
class WindowsNT31Section(pstruct.type):
type = 0x8c
_fields_ = [
(pint.uint16_t, 'Hardware timer emulation'),
(dyn.block(10), 'Unknown_2'),
(dyn.clone(pstr.string, length=64), 'CONFIG.NT filename'),
(dyn.clone(pstr.string, length=64), 'AUTOEXEC.NT filename'),
]
@SectionData.define
class WindowsNT40Section(pstruct.type):
type = 0x68c
_fields_ = [
(pint.uint32_t, 'Unknown_0'),
(dyn.clone(pstr.wstring, length=128), 'Unicode parameters'),
(dyn.clone(pstr.string, length=128), 'Ascii parameters'),
(dyn.block(240), 'Unknown_184'),
(dyn.clone(pstr.wstring, length=80), 'Unicode PIF filename'),
(dyn.clone(pstr.string, length=80), 'Ascii PIF filename'),
(dyn.clone(pstr.wstring, length=30), 'Unicode window title'),
(dyn.clone(pstr.string, length=30), 'Ascii window title'),
(dyn.clone(pstr.wstring, length=80), 'Unicode icon filename'),
(dyn.clone(pstr.string, length=80), 'Ascii icon filename'),
(dyn.clone(pstr.wstring, length=64), 'Unicode working directory'),
(dyn.clone(pstr.string, length=64), 'Ascii working directory'),
(dyn.block(286), 'Unknown_56e'),
]
class Sections(parray.terminated):
_object_ = Section
def isTerminator(self, item):
res = item['heading']
return res['NextOffset'].int() == 0xffff
class File(pstruct.type):
_fields_ = [
(BasicSection, 'basicSection'),
(Heading, 'basicHeading'),
(Sections, 'sections'),
]
def enumerate(self):
item = self['basicHeading']
yield item['Name'].str(), item['Offset'].d.li
while item['NextOffset'].int() < 0xffff:
res = item['NextOffset'].d.li
item = res['heading']
yield item['Name'].str(), item['Offset'].d.li
return
def iterate(self):
for _, item in self.enumerate():
yield item
return
if __name__ == '__main__':
import ptypes, local.pif as PIF
ptypes.setsource(ptypes.prov.file('/home/user/work/syringe/template/samples/_default.pif','rb'))
z = PIF.File()
z=z.l
for name, item in z.enumerate():
print(name)
print(item)
for item in z.iterate():
print(item)
| bsd-2-clause | 2,997,885,994,163,634,000 | 30.196721 | 100 | 0.507995 | false | 3.411268 | false | false | false |
musicbrainz/picard | picard/ui/tagsfromfilenames.py | 1 | 6502 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2007 Lukáš Lalinský
# Copyright (C) 2009, 2014, 2019-2020 Philipp Wolfer
# Copyright (C) 2012-2013 Michael Wiencek
# Copyright (C) 2014, 2017 Sophist-UK
# Copyright (C) 2016-2017 Sambhav Kothari
# Copyright (C) 2017 Ville Skyttä
# Copyright (C) 2018 Laurent Monin
# Copyright (C) 2018 Vishal Choudhary
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import OrderedDict
import os.path
import re
from PyQt5 import QtWidgets
from picard import config
from picard.script.parser import normalize_tagname
from picard.util.tags import display_tag_name
from picard.ui import PicardDialog
from picard.ui.ui_tagsfromfilenames import Ui_TagsFromFileNamesDialog
from picard.ui.util import StandardButton
class TagMatchExpression:
_numeric_tags = ('tracknumber', 'totaltracks', 'discnumber', 'totaldiscs')
def __init__(self, expression, replace_underscores=False):
self.replace_underscores = replace_underscores
self._tag_re = re.compile(r"(%\w+%)")
self._parse(expression)
def _parse(self, expression):
self._group_map = OrderedDict()
format_re = ['(?:^|/)']
for i, part in enumerate(self._tag_re.split(expression)):
if part.startswith('%') and part.endswith('%'):
name = part[1:-1]
group = '%s_%i' % (name, i)
tag = normalize_tagname(name)
self._group_map[group] = tag
if tag in self._numeric_tags:
format_re.append(r'(?P<' + group + r'>\d+)')
elif tag == 'date':
format_re.append(r'(?P<' + group + r'>\d+(?:-\d+(?:-\d+)?)?)')
else:
format_re.append(r'(?P<' + group + r'>[^/]*?)')
else:
format_re.append(re.escape(part))
# Optional extension
format_re.append(r'(?:\.\w+)?$')
self._format_re = re.compile("".join(format_re))
@property
def matched_tags(self):
# Return unique values, but preserve order
return list(OrderedDict.fromkeys(self._group_map.values()))
def match_file(self, filename):
match = self._format_re.search(filename.replace('\\', '/'))
if match:
result = {}
for group, tag in self._group_map.items():
value = match.group(group).strip()
if tag in self._numeric_tags:
value = value.lstrip("0")
if self.replace_underscores:
value = value.replace('_', ' ')
all_values = result.get(tag, [])
all_values.append(value)
result[tag] = all_values
return result
else:
return {}
class TagsFromFileNamesDialog(PicardDialog):
autorestore = False
options = [
config.TextOption("persist", "tags_from_filenames_format", ""),
]
def __init__(self, files, parent=None):
super().__init__(parent)
self.ui = Ui_TagsFromFileNamesDialog()
self.ui.setupUi(self)
self.restore_geometry()
items = [
"%artist%/%album%/%title%",
"%artist%/%album%/%tracknumber% %title%",
"%artist%/%album%/%tracknumber% - %title%",
"%artist%/%album% - %tracknumber% - %title%",
"%artist% - %album%/%title%",
"%artist% - %album%/%tracknumber% %title%",
"%artist% - %album%/%tracknumber% - %title%",
]
tff_format = config.persist["tags_from_filenames_format"]
if tff_format not in items:
selected_index = 0
if tff_format:
items.insert(0, tff_format)
else:
selected_index = items.index(tff_format)
self.ui.format.addItems(items)
self.ui.format.setCurrentIndex(selected_index)
self.ui.buttonbox.addButton(StandardButton(StandardButton.OK), QtWidgets.QDialogButtonBox.AcceptRole)
self.ui.buttonbox.addButton(StandardButton(StandardButton.CANCEL), QtWidgets.QDialogButtonBox.RejectRole)
self.ui.buttonbox.accepted.connect(self.accept)
self.ui.buttonbox.rejected.connect(self.reject)
self.ui.preview.clicked.connect(self.preview)
self.ui.files.setHeaderLabels([_("File Name")])
self.files = files
self.items = []
for file in files:
item = QtWidgets.QTreeWidgetItem(self.ui.files)
item.setText(0, os.path.basename(file.filename))
self.items.append(item)
def preview(self):
expression = TagMatchExpression(self.ui.format.currentText(), self.ui.replace_underscores.isChecked())
columns = expression.matched_tags
headers = [_("File Name")] + list(map(display_tag_name, columns))
self.ui.files.setColumnCount(len(headers))
self.ui.files.setHeaderLabels(headers)
for item, file in zip(self.items, self.files):
matches = expression.match_file(file.filename)
for i, column in enumerate(columns):
values = matches.get(column, [])
item.setText(i + 1, '; '.join(values))
self.ui.files.header().resizeSections(QtWidgets.QHeaderView.ResizeToContents)
self.ui.files.header().setStretchLastSection(True)
def accept(self):
expression = TagMatchExpression(self.ui.format.currentText(), self.ui.replace_underscores.isChecked())
for file in self.files:
metadata = expression.match_file(file.filename)
for name, values in metadata.items():
file.metadata[name] = values
file.update()
config.persist["tags_from_filenames_format"] = self.ui.format.currentText()
super().accept()
| gpl-2.0 | 2,771,010,965,814,264,000 | 39.111111 | 113 | 0.612188 | false | 3.867857 | false | false | false |
merc-devel/merc | merc/features/rfc1459/privmsg.py | 1 | 2517 | from merc import channel
from merc import errors
from merc import feature
from merc import message
from merc import mode
class PrivmsgFeature(feature.Feature):
NAME = __name__
install = PrivmsgFeature.install
MAX_TARGETS = 4
class _Privmsg(message.Command):
MIN_ARITY = 2
FORCE_TRAILING = True
def __init__(self, targets, text, *args):
self.targets = targets.split(",")
self.text = text
def as_command_params(self):
return [",".join(self.targets), self.text]
def compute_targets(self, app, user, target_name):
if channel.Channel.is_channel_name(target_name):
chan = app.channels.get(target_name)
if DisallowingExternalMessages(chan).get():
try:
chan.check_has_user(user)
except errors.NoSuchNick:
raise errors.CannotSendToChan(chan.name)
app.run_hooks("channel.message.check", user, chan)
if Moderated(chan).get():
chan.check_is_voiced(user)
app.run_hooks("channel.message", user, chan, self.text)
return (app.users.get_by_uid(uid) for uid in chan.users
if uid != user.uid)
else:
target = app.users.get(target_name)
app.run_hooks("user.message", user, target, self.text)
return [target]
def get_real_target_name(self, app, target_name):
if channel.Channel.is_channel_name(target_name):
return app.channels.get(target_name).name
else:
return app.users.get(target_name).nickname
@message.Command.requires_registration
def handle_for(self, app, user, prefix):
for target_name in self.targets[:MAX_TARGETS]:
real_target_name = self.get_real_target_name(app, target_name)
for target in self.compute_targets(app, user, target_name):
target.send(user.prefix, self.__class__(real_target_name, self.text))
@PrivmsgFeature.register_user_command
class Privmsg(_Privmsg):
NAME = "PRIVMSG"
@PrivmsgFeature.register_user_command
class Notice(_Privmsg):
NAME = "NOTICE"
@PrivmsgFeature.register_channel_mode
class DisallowingExternalMessages(mode.FlagMode, mode.ChanModeMixin):
CHAR = "n"
DEFAULT = True
@PrivmsgFeature.register_channel_mode
class Moderated(mode.FlagMode, mode.ChanModeMixin):
CHAR = "m"
@PrivmsgFeature.hook("server.notify")
def send_server_notice(app, user, text):
user.send_reply(Notice("*", text))
@PrivmsgFeature.hook("server.targmax.modify")
def modify_targmax(app, targmax):
targmax["PRIVMSG"] = MAX_TARGETS
targmax["NOTICE"] = MAX_TARGETS
| mit | -7,296,159,504,572,538,000 | 25.21875 | 77 | 0.684148 | false | 3.329365 | false | false | false |
denizs/torchUp | torchup/agents/DoubleDQN.py | 1 | 6576 | import torch
import torch.optim as optim
import torch.nn as nn
from torchup.agents.DQN import DQNAgent
from torchup.utils.utils import Transition
from torchup.base.models import Variable
class DoubleDQNAgent(DQNAgent):
'''
The DoubleDQNAgent is an implemenation of the Deep Reinforcement Agent
outlined in 'Deep Reinforcement Learning with Double Q-Learning'
by Hasselt et al.
Within the scope of this paper, it was shown that the DQN algorithm introduced
by Mnih et al. suffers from continuous overestimation of the state values,
which can lead to learning poorer policies.
As an approach to tackle this overestimation, the Double-Q Learning algorithm,
initially described by van Hasssel et al. was introduced.
Similarly to the DQN, this algorithm leverages to seperate neural networks
to estimate and update the state-action values. The main idea is to reduce
overestimation by dividing the max operation into a step of action selection
and action evaluation:
The online network is utilised for the action selection, while the target networks
estimates the resulting Q-value for this state:
DQN:
Q = r + gamma * Q_target(s', a)
DoubleDQN:
Q = r+ gamma * Q_target(s', argmax_a Q_online(s',a))
The experiments conducted priorly to the paper indicate that DoubleDQN significantly
reduces overestimation, resulting in outperforming the original DQN algorithm within
the Atari 2600 domain.
'''
def __init__(self, *args, **kwargs):
super(DoubleDQNAgent, self).__init__(target_model_update_freq=30000,
*args,
**kwargs)
# was kommt hier noch hin? eig nikkes oder :S
def optimize(self):
'''
The `optimize` method of the `DoubleDQNAgent` performs the batch updates
described in van Hasselt et al.'s paper 'Deep Reinforcement with Double Q-Learning'
1. Sample a random minibatch of transitions from the agent's replay memory
2. Set our FIXED targets `y_j` to:
2.1 `r_j` for terminal states
2.2 `r_j + GAMMA * Q_target(phi_j+1, argmax Q(s',a))` for non-terminal states
3. Compute our excepted Q_values by a full forward run of of states
4. Perform a gradient descent step on (y_j - Q(phi_j, a_j, theta))^2 with RMSprop
'''
self.n_backprop += 1 # increment the number of performed updates
# let's sample from our experience memory.
# The `sample_batch_size` is set during `__init__`, giving us any-time access.
s0_batch, a_batch, s1_batch, r_batch, t_batch = self.memory.sample(self.sample_batch_size)
if len(s0_batch) is 0:
self.data.loss = 0
self.logger.add_step(self.data)
return
r_batch_v = Variable(torch.cat(r_batch, dim=0))
s0_batch_v = Variable(torch.from_numpy(s0_batch))
s1_batch_v = Variable(torch.from_numpy(s1_batch), volatile=True)
a_batch_v = Variable(torch.cat(a_batch, dim=0))
# Before we start building our targets, it's time to get some expactations
# out of our neural net, which we then use to calculate the loss.
# As our model always returns one state-action value for each possible action,
# we need to 'select' the state-action value which corresponds to the action
# we have taken. Luckily, torch provides a method called `gather`, which allows
# us to do just this.
predicted_qs_v = self.model(s0_batch_v).gather(1, a_batch_v)
self.data.q_value = predicted_qs_v.data.mean()
# Now let's start building our targets:
# First, we need to divide our batch into two catergories:
# * Transitions, that lead to a terminal state
# * Transitions, that lead to a non-terminal state
# As described priorly, we set our targets `y_j` to `r_j` for terminal transitions
# and to `r_j + Q'(s_t+1, argmax Q(s_t+1)` for non-terminal transitions.
# We need to compute both, the expectations of the non-terminal `next_state`s
# and the expectation of all starting states (`state`).
# Also, we need to keep the order of the transitions consistent in order to
# perform the gradient update appropriately. In order to ahieve this,
# we create a bit mask which holds the position of the terminal
# states indicated by a 1. This will allow us to easily update our
# terminal state targets by selecting the respective targets via
# `target[terminal] = ...`:
terminal_mask = torch.ByteTensor(t_batch).type(self.ldt)
# First, let's obtain the actions that we should take according to our
# online model, which is represented by the argmax of the expexted Q_values
# of our non-terminal next states. We obtain these by calling `.max(1)`
# on our variable and selecting the second column. Remember the returns of
# `.max(1)`? Two `torch.autograd.Variable`s or `torch.Tensor`s, the first
# representing the actual values, the latter the indeces (which we want to obtain).
# Also, note that we reassign the data to a new variable, detaching it
# from the computational graph
next_a_v = self.model(s1_batch_v).max(1)[1]
next_a_v = Variable(next_a_v.data)
# Now let's evaluate our policy with respect to our target model's parameters:
next_qs_v = self.target_model(s1_batch_v)
next_qs_v = Variable(next_qs_v.data)
next_max_qs_v = next_qs_v.gather(1, next_a_v)
# Apply terminal mask:
next_max_qs_v[terminal_mask] = 0
# Now let's build our targets:
if next_qs_v.volatile:
next_qs_v.volatile = False
# Perform the update:
# r_j + Q'(s_t+1, argmax Q(s_t+1, a)):
targets_v = (next_max_qs_v * self.gamma) + r_batch_v # <-- fixed Q-target!
# Compute the loss:
loss = self.loss_function(predicted_qs_v, targets_v)
self.data.loss = loss.data[0]
self.logger.add_step(self.data)
# Optimize
self.optimizer.zero_grad()
loss.backward()
for param in self.model.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
# Let's check, if we need to update our target network
if self.n_backprop % self.target_model_update_freq is 0:
self.target_model.load_state_dict(self.model.state_dict())
| bsd-2-clause | -2,724,416,356,105,284,600 | 48.074627 | 98 | 0.648723 | false | 3.706877 | false | false | false |
otsaloma/gaupol | gaupol/attrdict.py | 1 | 2782 | # -*- coding: utf-8 -*-
# Copyright (C) 2006 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Observable dictionary with attribute access to keys."""
import aeidon
__all__ = ("AttributeDictionary",)
class AttributeDictionary(aeidon.Observable):
"""
Observable dictionary with attribute access to keys.
:class:`AttributeDictionary` is initialized from a root dictionary,
which is kept in sync with attribute values. This allows convenient
attribute access to dictionary keys and notifications of changes
via the :class:`aeidon.Observable` interface.
"""
def __init__(self, root):
"""Initialize an :class:`AttributeDictionary` instance."""
aeidon.Observable.__init__(self)
self._root = root
self.update(root)
def add_attribute(self, name, value):
"""Add instance attribute and corresponding root dictionary key."""
self._root[name] = value
# In the case of dictionaries, set the original dictionary
# to the root dictionary, but instantiate an AttributeDictionary
# for use as the corresponding attribute.
if isinstance(value, dict):
value = AttributeDictionary(value)
setattr(self, name, value)
self.connect("notify::{}".format(name), self._on_notify, name)
def extend(self, root):
"""Add new values from another root dictionary."""
for name, value in root.items():
if not hasattr(self, name):
self.add_attribute(name, value)
for name, value in root.items():
if isinstance(value, dict):
getattr(self, name).extend(value)
def _on_notify(self, obj, value, name):
"""Synchronize changed attribute value with root dictionary."""
self._root[name] = value
def update(self, root):
"""Update values from another root dictionary."""
self.extend(root)
for name, value in root.items():
if not isinstance(value, dict):
setattr(self, name, value)
for name, value in root.items():
if isinstance(value, dict):
getattr(self, name).update(value)
| gpl-3.0 | 207,437,394,888,405,980 | 36.594595 | 75 | 0.659238 | false | 4.422893 | false | false | false |
timsavage/odin | odin/resources.py | 1 | 18168 | # -*- coding: utf-8 -*-
import copy
import six
from odin import exceptions, registration
from odin.exceptions import ValidationError
from odin.fields import NOT_PROVIDED
from odin.utils import cached_property, field_iter_items
DEFAULT_TYPE_FIELD = '$'
META_OPTION_NAMES = (
'name', 'namespace', 'name_space', 'verbose_name', 'verbose_name_plural', 'abstract', 'doc_group', 'type_field'
)
class ResourceOptions(object):
def __init__(self, meta):
self.meta = meta
self.parents = []
self.fields = []
self.virtual_fields = []
self.name = None
self.class_name = None
self.name_space = NOT_PROVIDED
self.verbose_name = None
self.verbose_name_plural = None
self.abstract = False
self.doc_group = None
self.type_field = DEFAULT_TYPE_FIELD
self._cache = {}
def contribute_to_class(self, cls, name):
cls._meta = self
self.name = cls.__name__
self.class_name = "%s.%s" % (cls.__module__, cls.__name__)
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
if name.startswith('_'):
del meta_attrs[name]
for attr_name in META_OPTION_NAMES:
if attr_name in meta_attrs:
# Allow meta to be defined as namespace
if attr_name == 'namespace':
setattr(self, 'name_space', meta_attrs.pop(attr_name))
else:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
del self.meta
if not self.verbose_name:
self.verbose_name = self.name.replace('_', ' ').strip('_ ')
if not self.verbose_name_plural:
self.verbose_name_plural = self.verbose_name + 's'
def add_field(self, field):
self.fields.append(field)
cached_property.clear_caches(self)
def add_virtual_field(self, field):
self.virtual_fields.append(field)
cached_property.clear_caches(self)
@property
def resource_name(self):
"""
Full name of resource including namespace (if specified)
"""
if self.name_space:
return "%s.%s" % (self.name_space, self.name)
else:
return self.name
@cached_property
def all_fields(self):
"""
All fields both standard and virtual.
"""
return self.fields + self.virtual_fields
@cached_property
def composite_fields(self):
"""
All composite fields.
"""
# Not the nicest solution but is a fairly safe way of detecting a composite field.
return [f for f in self.fields if (hasattr(f, 'of') and issubclass(f.of, Resource))]
@cached_property
def container_fields(self):
"""
All composite fields with the container flag.
Used by XML like codecs.
"""
return [f for f in self.composite_fields if getattr(f, 'use_container', False)]
@cached_property
def field_map(self):
return {f.attname: f for f in self.fields}
@cached_property
def parent_resource_names(self):
"""
List of parent resource names.
"""
return [p._meta.resource_name for p in self.parents]
@cached_property
def attribute_fields(self):
"""
List of fields where is_attribute is True.
"""
return [f for f in self.fields if f.is_attribute]
@cached_property
def element_fields(self):
"""
List of fields where is_attribute is False.
"""
return [f for f in self.fields if not f.is_attribute]
@cached_property
def element_field_map(self):
return {f.attname: f for f in self.element_fields}
def __repr__(self):
return '<Options for %s>' % self.resource_name
class ResourceBase(type):
"""
Metaclass for all Resources.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ResourceBase, cls).__new__
# attrs will never be empty for classes declared in the standard way
# (ie. with the `class` keyword). This is quite robust.
if name == 'NewBase' and attrs == {}:
return super_new(cls, name, bases, attrs)
parents = [b for b in bases if isinstance(b, ResourceBase) and not (b.__name__ == 'NewBase'
and b.__mro__ == (b, object))]
if not parents:
# If this isn't a subclass of Resource, don't do anything special.
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
new_class.add_to_class('_meta', ResourceOptions(meta))
# Generate a namespace if one is not provided
if new_class._meta.name_space is NOT_PROVIDED and base_meta:
# Namespace is inherited
if (not new_class._meta.name_space) or (new_class._meta.name_space is NOT_PROVIDED):
new_class._meta.name_space = base_meta.name_space
if new_class._meta.name_space is NOT_PROVIDED:
new_class._meta.name_space = module
# Bail out early if we have already created this class.
r = registration.get_resource(new_class._meta.resource_name)
if r is not None:
return r
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# Sort the fields
new_class._meta.fields = sorted(new_class._meta.fields, key=hash)
# All the fields of any type declared on this model
local_field_attnames = set([f.attname for f in new_class._meta.fields])
field_attnames = set(local_field_attnames)
for base in parents:
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in base._meta.all_fields:
if field.attname in local_field_attnames:
raise Exception('Local field %r in class %r clashes with field of similar name from '
'base class %r' % (field.attname, name, base.__name__))
for field in base._meta.fields:
if field.attname not in field_attnames:
field_attnames.add(field.attname)
new_class.add_to_class(field.attname, copy.deepcopy(field))
for field in base._meta.virtual_fields:
new_class.add_to_class(field.attname, copy.deepcopy(field))
new_class._meta.parents += base._meta.parents
new_class._meta.parents.append(base)
if abstract:
return new_class
# Register resource
registration.register_resources(new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return registration.get_resource(new_class._meta.resource_name)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
@six.add_metaclass(ResourceBase)
class Resource(object):
def __init__(self, *args, **kwargs):
args_len = len(args)
if args_len > len(self._meta.fields):
raise TypeError('This resource takes %s positional arguments but %s where given.' % (
len(self._meta.fields), args_len))
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
fields_iter = iter(self._meta.fields)
if args_len:
if not kwargs:
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
try:
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
setattr(self, field.attname, val)
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self)
def __str__(self):
return '%s resource' % self._meta.resource_name
@classmethod
def create_from_dict(cls, d, full_clean=False):
"""
Create a resource instance from a dictionary.
"""
return create_resource_from_dict(d, cls, full_clean)
def to_dict(self, include_virtual=True):
"""
Convert this resource into a `dict` of field_name/value pairs.
.. note::
This method is not recursive, it only operates on this single resource, any sub resources are returned as
is. The use case that prompted the creation of this method is within codecs when a resource must be
converted into a type that can be serialised, these codecs then operate recursively on the returned `dict`.
:param include_virtual: Include virtual fields when generating `dict`.
"""
fields = self._meta.all_fields if include_virtual else self._meta.fields
return dict((f.name, v) for f, v in field_iter_items(self, fields))
def convert_to(self, to_resource, context=None, ignore_fields=None, **field_values):
"""
Convert this resource into a specified resource.
A mapping must be defined for conversion between this resource and to_resource or an exception will be raised.
"""
mapping = registration.get_mapping(self.__class__, to_resource)
ignore_fields = ignore_fields or []
ignore_fields.extend(mapping.exclude_fields)
self.full_clean(ignore_fields)
return mapping(self, context).convert(**field_values)
def update_existing(self, dest_obj, context=None, ignore_fields=None):
"""
Update the fields on an existing destination object.
A mapping must be defined for conversion between this resource and ``dest_obj`` type or an exception will be
raised.
"""
self.full_clean(ignore_fields)
mapping = registration.get_mapping(self.__class__, dest_obj.__class__)
return mapping(self, context).update(dest_obj, ignore_fields)
def extra_attrs(self, attrs):
"""
Called during de-serialisation of data if there are any extra fields defined in the document.
This allows the resource to decide how to handle these fields. By default they are ignored.
"""
pass
def clean(self):
"""
Chance to do more in depth validation.
"""
pass
def full_clean(self, exclude=None):
"""
Calls clean_fields, clean on the resource and raises ``ValidationError``
for any errors that occurred.
"""
errors = {}
try:
self.clean_fields(exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
errors = {}
for f in self._meta.fields:
if exclude and f.name in exclude:
continue
raw_value = f.value_from_object(self)
if f.null and raw_value is None:
continue
try:
raw_value = f.clean(raw_value)
except ValidationError as e:
errors[f.name] = e.messages
# Check for resource level clean methods.
clean_method = getattr(self, "clean_%s" % f.attname, None)
if callable(clean_method):
try:
raw_value = clean_method(raw_value)
except ValidationError as e:
errors.setdefault(f.name, []).extend(e.messages)
setattr(self, f.attname, raw_value)
if errors:
raise ValidationError(errors)
def resolve_resource_type(resource):
if isinstance(resource, type) and issubclass(resource, Resource):
return resource._meta.resource_name, resource._meta.type_field
else:
return resource, DEFAULT_TYPE_FIELD
def create_resource_from_dict(d, resource=None, full_clean=True, copy_dict=True):
"""
Create a resource from a dict.
:param d: dictionary of data.
:param resource: A resource type, resource name or list of resources and names to use as the base for creating a
resource. If a list is supplied the first item will be used if a resource type is not supplied; this could also
be a parent(s) of any resource defined by the dict.
:param full_clean: Do a full clean as part of the creation.
:param copy_dict: Use a copy of the input dictionary rather than destructively processing the input dict.
"""
assert isinstance(d, dict)
if copy_dict:
d = d.copy()
if resource:
resource_type = None
# Convert to single resource then resolve document type
if isinstance(resource, (tuple, list)):
resources = (resolve_resource_type(r) for r in resource)
else:
resources = [resolve_resource_type(resource)]
for resource_name, type_field in resources:
# See if the input includes a type field and check it's registered
document_resource_name = d.get(type_field, None)
if document_resource_name:
resource_type = registration.get_resource(document_resource_name)
else:
resource_type = registration.get_resource(resource_name)
if not resource_type:
raise exceptions.ResourceException("Resource `%s` is not registered." % document_resource_name)
if document_resource_name:
# Check resource types match or are inherited types
if (resource_name == document_resource_name or
resource_name in resource_type._meta.parent_resource_names):
break # We are done
else:
break
if not resource_type:
raise exceptions.ResourceException(
"Incoming resource does not match [%s]" % ', '.join(r for r, t in resources))
else:
# No resource specified, relay on type field
document_resource_name = d.pop(DEFAULT_TYPE_FIELD, None)
if not document_resource_name:
raise exceptions.ResourceException("Resource not defined.")
# Get an instance of a resource type
resource_type = registration.get_resource(document_resource_name)
if not resource_type:
raise exceptions.ResourceException("Resource `%s` is not registered." % document_resource_name)
attrs = []
errors = {}
for f in resource_type._meta.fields:
value = d.pop(f.name, NOT_PROVIDED)
if value is NOT_PROVIDED:
value = f.get_default() if f.use_default_if_not_provided else None
else:
try:
value = f.to_python(value)
except ValidationError as ve:
errors[f.name] = ve.error_messages
attrs.append(value)
if errors:
raise ValidationError(errors)
new_resource = resource_type(*attrs)
if d:
new_resource.extra_attrs(d)
if full_clean:
new_resource.full_clean()
return new_resource
def build_object_graph(d, resource=None, full_clean=True, copy_dict=True):
"""
Generate an object graph from a dict
:param resource: A resource type, resource name or list of resources and names to use as the base for creating a
resource. If a list is supplied the first item will be used if a resource type is not supplied.
:raises ValidationError: During building of the object graph and issues discovered are raised as a ValidationError.
"""
if isinstance(d, dict):
return create_resource_from_dict(d, resource, full_clean, copy_dict)
if isinstance(d, list):
return [build_object_graph(o, resource, full_clean, copy_dict) for o in d]
return d
class ResourceIterable(object):
"""
Iterable that yields resources.
"""
def __init__(self, sequence):
self.sequence = sequence
def __iter__(self):
for item in self.sequence:
yield item
| bsd-3-clause | -4,006,824,706,083,677,700 | 34.83432 | 119 | 0.59269 | false | 4.27583 | false | false | false |
swistaq/aoc2016 | src/day10.py | 1 | 2065 | from __future__ import print_function
import re
# id low high
# {botno : ((bot|output, id), (bot|output, id))}
bots = {}
# (value, botno)
vals = []
# {output_id : [values]}
outputs = {}
# {botno : [values]}
states = {}
def parse(line):
global bots, vals, outputs
if line.startswith("bot"):
match = re.search("bot\s(\d+)\sgives low to (bot|output)\s(\d+) and high to (bot|output)\s(\d+)", line)
bots.update(
{int(match.group(1)): ((match.group(2), int(match.group(3))), (match.group(4), int(match.group(5))))})
elif line.startswith("value"):
match = re.search("value\s(\d+)\sgoes to bot\s(\d+)", line)
vals.append((int(match.group(1)), int(match.group(2))))
def update_output(output_id, value):
global outputs
if outputs.has_key(output_id):
outputs.get(output_id).append(value)
else:
outputs.update({output_id: [value]})
def update_bot(bot_no, value):
global states, special_bot
if states.has_key(bot_no):
states.get(bot_no).append(value)
if states.get(bot_no).__len__() == 2:
vals = sorted(states.get(bot_no))
if vals[0] == 17 and vals[1] == 61:
print("BOT COMPARING 17 AND 61:", bot_no)
states.update({bot_no: []})
((target_low, targe_low_id), (target_high, target_high_id)) = bots.get(bot_no)
if target_low == "bot":
update_bot(targe_low_id, vals[0])
elif target_low == "output":
update_output(targe_low_id, vals[0])
if target_high == "bot":
update_bot(target_high_id, vals[1])
elif target_high == "output":
update_output(target_high_id, vals[1])
else:
states.update({bot_no: [value]})
if __name__ == "__main__":
with open("resources/day10") as infile:
for line in infile:
parse(line)
for val in vals:
update_bot(val[1], val[0])
print(outputs.get(0)[0]*outputs.get(1)[0]*outputs.get(2)[0])
| gpl-3.0 | -7,242,079,772,893,059,000 | 31.777778 | 114 | 0.543826 | false | 3.20155 | false | false | false |
HiSPARC/station-software | user/python/Lib/lib-tk/ttk.py | 2 | 56173 | """Ttk wrapper.
This module provides classes to allow using Tk themed widget set.
Ttk is based on a revised and enhanced version of
TIP #48 (http://tip.tcl.tk/48) specified style engine.
Its basic idea is to separate, to the extent possible, the code
implementing a widget's behavior from the code implementing its
appearance. Widget class bindings are primarily responsible for
maintaining the widget state and invoking callbacks, all aspects
of the widgets appearance lies at Themes.
"""
__version__ = "0.3.1"
__author__ = "Guilherme Polo <[email protected]>"
__all__ = ["Button", "Checkbutton", "Combobox", "Entry", "Frame", "Label",
"Labelframe", "LabelFrame", "Menubutton", "Notebook", "Panedwindow",
"PanedWindow", "Progressbar", "Radiobutton", "Scale", "Scrollbar",
"Separator", "Sizegrip", "Style", "Treeview",
# Extensions
"LabeledScale", "OptionMenu",
# functions
"tclobjs_to_py", "setup_master"]
import Tkinter
from Tkinter import _flatten, _join, _stringify, _splitdict
# Verify if Tk is new enough to not need the Tile package
_REQUIRE_TILE = True if Tkinter.TkVersion < 8.5 else False
def _load_tile(master):
if _REQUIRE_TILE:
import os
tilelib = os.environ.get('TILE_LIBRARY')
if tilelib:
# append custom tile path to the list of directories that
# Tcl uses when attempting to resolve packages with the package
# command
master.tk.eval(
'global auto_path; '
'lappend auto_path {%s}' % tilelib)
master.tk.eval('package require tile') # TclError may be raised here
master._tile_loaded = True
def _format_optvalue(value, script=False):
"""Internal function."""
if script:
# if caller passes a Tcl script to tk.call, all the values need to
# be grouped into words (arguments to a command in Tcl dialect)
value = _stringify(value)
elif isinstance(value, (list, tuple)):
value = _join(value)
return value
def _format_optdict(optdict, script=False, ignore=None):
"""Formats optdict to a tuple to pass it to tk.call.
E.g. (script=False):
{'foreground': 'blue', 'padding': [1, 2, 3, 4]} returns:
('-foreground', 'blue', '-padding', '1 2 3 4')"""
opts = []
for opt, value in optdict.iteritems():
if not ignore or opt not in ignore:
opts.append("-%s" % opt)
if value is not None:
opts.append(_format_optvalue(value, script))
return _flatten(opts)
def _mapdict_values(items):
# each value in mapdict is expected to be a sequence, where each item
# is another sequence containing a state (or several) and a value
# E.g. (script=False):
# [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]
# returns:
# ['active selected', 'grey', 'focus', [1, 2, 3, 4]]
opt_val = []
for item in items:
state = item[:-1]
val = item[-1]
# hacks for bakward compatibility
state[0] # raise IndexError if empty
if len(state) == 1:
# if it is empty (something that evaluates to False), then
# format it to Tcl code to denote the "normal" state
state = state[0] or ''
else:
# group multiple states
state = ' '.join(state) # raise TypeError if not str
opt_val.append(state)
if val is not None:
opt_val.append(val)
return opt_val
def _format_mapdict(mapdict, script=False):
"""Formats mapdict to pass it to tk.call.
E.g. (script=False):
{'expand': [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]}
returns:
('-expand', '{active selected} grey focus {1, 2, 3, 4}')"""
opts = []
for opt, value in mapdict.iteritems():
opts.extend(("-%s" % opt,
_format_optvalue(_mapdict_values(value), script)))
return _flatten(opts)
def _format_elemcreate(etype, script=False, *args, **kw):
"""Formats args and kw according to the given element factory etype."""
spec = None
opts = ()
if etype in ("image", "vsapi"):
if etype == "image": # define an element based on an image
# first arg should be the default image name
iname = args[0]
# next args, if any, are statespec/value pairs which is almost
# a mapdict, but we just need the value
imagespec = _join(_mapdict_values(args[1:]))
spec = "%s %s" % (iname, imagespec)
else:
# define an element whose visual appearance is drawn using the
# Microsoft Visual Styles API which is responsible for the
# themed styles on Windows XP and Vista.
# Availability: Tk 8.6, Windows XP and Vista.
class_name, part_id = args[:2]
statemap = _join(_mapdict_values(args[2:]))
spec = "%s %s %s" % (class_name, part_id, statemap)
opts = _format_optdict(kw, script)
elif etype == "from": # clone an element
# it expects a themename and optionally an element to clone from,
# otherwise it will clone {} (empty element)
spec = args[0] # theme name
if len(args) > 1: # elementfrom specified
opts = (_format_optvalue(args[1], script),)
if script:
spec = '{%s}' % spec
opts = ' '.join(opts)
return spec, opts
def _format_layoutlist(layout, indent=0, indent_size=2):
"""Formats a layout list so we can pass the result to ttk::style
layout and ttk::style settings. Note that the layout doesn't have to
be a list necessarily.
E.g.:
[("Menubutton.background", None),
("Menubutton.button", {"children":
[("Menubutton.focus", {"children":
[("Menubutton.padding", {"children":
[("Menubutton.label", {"side": "left", "expand": 1})]
})]
})]
}),
("Menubutton.indicator", {"side": "right"})
]
returns:
Menubutton.background
Menubutton.button -children {
Menubutton.focus -children {
Menubutton.padding -children {
Menubutton.label -side left -expand 1
}
}
}
Menubutton.indicator -side right"""
script = []
for layout_elem in layout:
elem, opts = layout_elem
opts = opts or {}
fopts = ' '.join(_format_optdict(opts, True, ("children",)))
head = "%s%s%s" % (' ' * indent, elem, (" %s" % fopts) if fopts else '')
if "children" in opts:
script.append(head + " -children {")
indent += indent_size
newscript, indent = _format_layoutlist(opts['children'], indent,
indent_size)
script.append(newscript)
indent -= indent_size
script.append('%s}' % (' ' * indent))
else:
script.append(head)
return '\n'.join(script), indent
def _script_from_settings(settings):
"""Returns an appropriate script, based on settings, according to
theme_settings definition to be used by theme_settings and
theme_create."""
script = []
# a script will be generated according to settings passed, which
# will then be evaluated by Tcl
for name, opts in settings.iteritems():
# will format specific keys according to Tcl code
if opts.get('configure'): # format 'configure'
s = ' '.join(_format_optdict(opts['configure'], True))
script.append("ttk::style configure %s %s;" % (name, s))
if opts.get('map'): # format 'map'
s = ' '.join(_format_mapdict(opts['map'], True))
script.append("ttk::style map %s %s;" % (name, s))
if 'layout' in opts: # format 'layout' which may be empty
if not opts['layout']:
s = 'null' # could be any other word, but this one makes sense
else:
s, _ = _format_layoutlist(opts['layout'])
script.append("ttk::style layout %s {\n%s\n}" % (name, s))
if opts.get('element create'): # format 'element create'
eopts = opts['element create']
etype = eopts[0]
# find where args end, and where kwargs start
argc = 1 # etype was the first one
while argc < len(eopts) and not hasattr(eopts[argc], 'iteritems'):
argc += 1
elemargs = eopts[1:argc]
elemkw = eopts[argc] if argc < len(eopts) and eopts[argc] else {}
spec, opts = _format_elemcreate(etype, True, *elemargs, **elemkw)
script.append("ttk::style element create %s %s %s %s" % (
name, etype, spec, opts))
return '\n'.join(script)
def _list_from_statespec(stuple):
"""Construct a list from the given statespec tuple according to the
accepted statespec accepted by _format_mapdict."""
nval = []
for val in stuple:
typename = getattr(val, 'typename', None)
if typename is None:
nval.append(val)
else: # this is a Tcl object
val = str(val)
if typename == 'StateSpec':
val = val.split()
nval.append(val)
it = iter(nval)
return [_flatten(spec) for spec in zip(it, it)]
def _list_from_layouttuple(tk, ltuple):
"""Construct a list from the tuple returned by ttk::layout, this is
somewhat the reverse of _format_layoutlist."""
ltuple = tk.splitlist(ltuple)
res = []
indx = 0
while indx < len(ltuple):
name = ltuple[indx]
opts = {}
res.append((name, opts))
indx += 1
while indx < len(ltuple): # grab name's options
opt, val = ltuple[indx:indx + 2]
if not opt.startswith('-'): # found next name
break
opt = opt[1:] # remove the '-' from the option
indx += 2
if opt == 'children':
val = _list_from_layouttuple(tk, val)
opts[opt] = val
return res
def _val_or_dict(tk, options, *args):
"""Format options then call Tk command with args and options and return
the appropriate result.
If no option is specified, a dict is returned. If an option is
specified with the None value, the value for that option is returned.
Otherwise, the function just sets the passed options and the caller
shouldn't be expecting a return value anyway."""
options = _format_optdict(options)
res = tk.call(*(args + options))
if len(options) % 2: # option specified without a value, return its value
return res
return _splitdict(tk, res, conv=_tclobj_to_py)
def _convert_stringval(value):
"""Converts a value to, hopefully, a more appropriate Python object."""
value = unicode(value)
try:
value = int(value)
except (ValueError, TypeError):
pass
return value
def _to_number(x):
if isinstance(x, str):
if '.' in x:
x = float(x)
else:
x = int(x)
return x
def _tclobj_to_py(val):
"""Return value converted from Tcl object to Python object."""
if val and hasattr(val, '__len__') and not isinstance(val, basestring):
if getattr(val[0], 'typename', None) == 'StateSpec':
val = _list_from_statespec(val)
else:
val = map(_convert_stringval, val)
elif hasattr(val, 'typename'): # some other (single) Tcl object
val = _convert_stringval(val)
return val
def tclobjs_to_py(adict):
"""Returns adict with its values converted from Tcl objects to Python
objects."""
for opt, val in adict.items():
adict[opt] = _tclobj_to_py(val)
return adict
def setup_master(master=None):
"""If master is not None, itself is returned. If master is None,
the default master is returned if there is one, otherwise a new
master is created and returned.
If it is not allowed to use the default root and master is None,
RuntimeError is raised."""
if master is None:
if Tkinter._support_default_root:
master = Tkinter._default_root or Tkinter.Tk()
else:
raise RuntimeError(
"No master specified and Tkinter is "
"configured to not support default root")
return master
class Style(object):
"""Manipulate style database."""
_name = "ttk::style"
def __init__(self, master=None):
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
self.master = master
self.tk = self.master.tk
def configure(self, style, query_opt=None, **kw):
"""Query or sets the default value of the specified option(s) in
style.
Each key in kw is an option and each value is either a string or
a sequence identifying the value for that option."""
if query_opt is not None:
kw[query_opt] = None
return _val_or_dict(self.tk, kw, self._name, "configure", style)
def map(self, style, query_opt=None, **kw):
"""Query or sets dynamic values of the specified option(s) in
style.
Each key in kw is an option and each value should be a list or a
tuple (usually) containing statespecs grouped in tuples, or list,
or something else of your preference. A statespec is compound of
one or more states and then a value."""
if query_opt is not None:
return _list_from_statespec(self.tk.splitlist(
self.tk.call(self._name, "map", style, '-%s' % query_opt)))
return _splitdict(
self.tk,
self.tk.call(self._name, "map", style, *_format_mapdict(kw)),
conv=_tclobj_to_py)
def lookup(self, style, option, state=None, default=None):
"""Returns the value specified for option in style.
If state is specified it is expected to be a sequence of one
or more states. If the default argument is set, it is used as
a fallback value in case no specification for option is found."""
state = ' '.join(state) if state else ''
return self.tk.call(self._name, "lookup", style, '-%s' % option,
state, default)
def layout(self, style, layoutspec=None):
"""Define the widget layout for given style. If layoutspec is
omitted, return the layout specification for given style.
layoutspec is expected to be a list or an object different than
None that evaluates to False if you want to "turn off" that style.
If it is a list (or tuple, or something else), each item should be
a tuple where the first item is the layout name and the second item
should have the format described below:
LAYOUTS
A layout can contain the value None, if takes no options, or
a dict of options specifying how to arrange the element.
The layout mechanism uses a simplified version of the pack
geometry manager: given an initial cavity, each element is
allocated a parcel. Valid options/values are:
side: whichside
Specifies which side of the cavity to place the
element; one of top, right, bottom or left. If
omitted, the element occupies the entire cavity.
sticky: nswe
Specifies where the element is placed inside its
allocated parcel.
children: [sublayout... ]
Specifies a list of elements to place inside the
element. Each element is a tuple (or other sequence)
where the first item is the layout name, and the other
is a LAYOUT."""
lspec = None
if layoutspec:
lspec = _format_layoutlist(layoutspec)[0]
elif layoutspec is not None: # will disable the layout ({}, '', etc)
lspec = "null" # could be any other word, but this may make sense
# when calling layout(style) later
return _list_from_layouttuple(self.tk,
self.tk.call(self._name, "layout", style, lspec))
def element_create(self, elementname, etype, *args, **kw):
"""Create a new element in the current theme of given etype."""
spec, opts = _format_elemcreate(etype, False, *args, **kw)
self.tk.call(self._name, "element", "create", elementname, etype,
spec, *opts)
def element_names(self):
"""Returns the list of elements defined in the current theme."""
return self.tk.splitlist(self.tk.call(self._name, "element", "names"))
def element_options(self, elementname):
"""Return the list of elementname's options."""
return self.tk.splitlist(self.tk.call(self._name, "element", "options", elementname))
def theme_create(self, themename, parent=None, settings=None):
"""Creates a new theme.
It is an error if themename already exists. If parent is
specified, the new theme will inherit styles, elements and
layouts from the specified parent theme. If settings are present,
they are expected to have the same syntax used for theme_settings."""
script = _script_from_settings(settings) if settings else ''
if parent:
self.tk.call(self._name, "theme", "create", themename,
"-parent", parent, "-settings", script)
else:
self.tk.call(self._name, "theme", "create", themename,
"-settings", script)
def theme_settings(self, themename, settings):
"""Temporarily sets the current theme to themename, apply specified
settings and then restore the previous theme.
Each key in settings is a style and each value may contain the
keys 'configure', 'map', 'layout' and 'element create' and they
are expected to have the same format as specified by the methods
configure, map, layout and element_create respectively."""
script = _script_from_settings(settings)
self.tk.call(self._name, "theme", "settings", themename, script)
def theme_names(self):
"""Returns a list of all known themes."""
return self.tk.splitlist(self.tk.call(self._name, "theme", "names"))
def theme_use(self, themename=None):
"""If themename is None, returns the theme in use, otherwise, set
the current theme to themename, refreshes all widgets and emits
a <<ThemeChanged>> event."""
if themename is None:
# Starting on Tk 8.6, checking this global is no longer needed
# since it allows doing self.tk.call(self._name, "theme", "use")
return self.tk.eval("return $ttk::currentTheme")
# using "ttk::setTheme" instead of "ttk::style theme use" causes
# the variable currentTheme to be updated, also, ttk::setTheme calls
# "ttk::style theme use" in order to change theme.
self.tk.call("ttk::setTheme", themename)
class Widget(Tkinter.Widget):
"""Base class for Tk themed widgets."""
def __init__(self, master, widgetname, kw=None):
"""Constructs a Ttk Widget with the parent master.
STANDARD OPTIONS
class, cursor, takefocus, style
SCROLLABLE WIDGET OPTIONS
xscrollcommand, yscrollcommand
LABEL WIDGET OPTIONS
text, textvariable, underline, image, compound, width
WIDGET STATES
active, disabled, focus, pressed, selected, background,
readonly, alternate, invalid
"""
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
Tkinter.Widget.__init__(self, master, widgetname, kw=kw)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the empty
string if the point does not lie within any element.
x and y are pixel coordinates relative to the widget."""
return self.tk.call(self._w, "identify", x, y)
def instate(self, statespec, callback=None, *args, **kw):
"""Test the widget's state.
If callback is not specified, returns True if the widget state
matches statespec and False otherwise. If callback is specified,
then it will be invoked with *args, **kw if the widget state
matches statespec. statespec is expected to be a sequence."""
ret = self.tk.getboolean(
self.tk.call(self._w, "instate", ' '.join(statespec)))
if ret and callback:
return callback(*args, **kw)
return ret
def state(self, statespec=None):
"""Modify or inquire widget state.
Widget state is returned if statespec is None, otherwise it is
set according to the statespec flags and then a new state spec
is returned indicating which flags were changed. statespec is
expected to be a sequence."""
if statespec is not None:
statespec = ' '.join(statespec)
return self.tk.splitlist(str(self.tk.call(self._w, "state", statespec)))
class Button(Widget):
"""Ttk Button widget, displays a textual label and/or image, and
evaluates a command when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Button widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, default, width
"""
Widget.__init__(self, master, "ttk::button", kw)
def invoke(self):
"""Invokes the command associated with the button."""
return self.tk.call(self._w, "invoke")
class Checkbutton(Widget):
"""Ttk Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Checkbutton widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, offvalue, onvalue, variable
"""
Widget.__init__(self, master, "ttk::checkbutton", kw)
def invoke(self):
"""Toggles between the selected and deselected states and
invokes the associated command. If the widget is currently
selected, sets the option variable to the offvalue option
and deselects the widget; otherwise, sets the option variable
to the option onvalue.
Returns the result of the associated command."""
return self.tk.call(self._w, "invoke")
class Entry(Widget, Tkinter.Entry):
"""Ttk Entry widget displays a one-line text string and allows that
string to be edited by the user."""
def __init__(self, master=None, widget=None, **kw):
"""Constructs a Ttk Entry widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand
WIDGET-SPECIFIC OPTIONS
exportselection, invalidcommand, justify, show, state,
textvariable, validate, validatecommand, width
VALIDATION MODES
none, key, focus, focusin, focusout, all
"""
Widget.__init__(self, master, widget or "ttk::entry", kw)
def bbox(self, index):
"""Return a tuple of (x, y, width, height) which describes the
bounding box of the character given by index."""
return self._getints(self.tk.call(self._w, "bbox", index))
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the
empty string if the coordinates are outside the window."""
return self.tk.call(self._w, "identify", x, y)
def validate(self):
"""Force revalidation, independent of the conditions specified
by the validate option. Returns False if validation fails, True
if it succeeds. Sets or clears the invalid state accordingly."""
return self.tk.getboolean(self.tk.call(self._w, "validate"))
class Combobox(Entry):
"""Ttk Combobox widget combines a text field with a pop-down list of
values."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Combobox widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
exportselection, justify, height, postcommand, state,
textvariable, values, width
"""
Entry.__init__(self, master, "ttk::combobox", **kw)
def current(self, newindex=None):
"""If newindex is supplied, sets the combobox value to the
element at position newindex in the list of values. Otherwise,
returns the index of the current value in the list of values
or -1 if the current value does not appear in the list."""
if newindex is None:
return self.tk.getint(self.tk.call(self._w, "current"))
return self.tk.call(self._w, "current", newindex)
def set(self, value):
"""Sets the value of the combobox to value."""
self.tk.call(self._w, "set", value)
class Frame(Widget):
"""Ttk Frame widget is a container, used to group other widgets
together."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Frame with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
borderwidth, relief, padding, width, height
"""
Widget.__init__(self, master, "ttk::frame", kw)
class Label(Widget):
"""Ttk Label widget displays a textual label and/or image."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Label with parent master.
STANDARD OPTIONS
class, compound, cursor, image, style, takefocus, text,
textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
anchor, background, font, foreground, justify, padding,
relief, text, wraplength
"""
Widget.__init__(self, master, "ttk::label", kw)
class Labelframe(Widget):
"""Ttk Labelframe widget is a container used to group other widgets
together. It has an optional label, which may be a plain text string
or another widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Labelframe with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
labelanchor, text, underline, padding, labelwidget, width,
height
"""
Widget.__init__(self, master, "ttk::labelframe", kw)
LabelFrame = Labelframe # Tkinter name compatibility
class Menubutton(Widget):
"""Ttk Menubutton widget displays a textual label and/or image, and
displays a menu when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Menubutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
direction, menu
"""
Widget.__init__(self, master, "ttk::menubutton", kw)
class Notebook(Widget):
"""Ttk Notebook widget manages a collection of windows and displays
a single one at a time. Each child window is associated with a tab,
which the user may select to change the currently-displayed window."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Notebook with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
height, padding, width
TAB OPTIONS
state, sticky, padding, text, image, compound, underline
TAB IDENTIFIERS (tab_id)
The tab_id argument found in several methods may take any of
the following forms:
* An integer between zero and the number of tabs
* The name of a child window
* A positional specification of the form "@x,y", which
defines the tab
* The string "current", which identifies the
currently-selected tab
* The string "end", which returns the number of tabs (only
valid for method index)
"""
Widget.__init__(self, master, "ttk::notebook", kw)
def add(self, child, **kw):
"""Adds a new tab to the notebook.
If window is currently managed by the notebook but hidden, it is
restored to its previous position."""
self.tk.call(self._w, "add", child, *(_format_optdict(kw)))
def forget(self, tab_id):
"""Removes the tab specified by tab_id, unmaps and unmanages the
associated window."""
self.tk.call(self._w, "forget", tab_id)
def hide(self, tab_id):
"""Hides the tab specified by tab_id.
The tab will not be displayed, but the associated window remains
managed by the notebook and its configuration remembered. Hidden
tabs may be restored with the add command."""
self.tk.call(self._w, "hide", tab_id)
def identify(self, x, y):
"""Returns the name of the tab element at position x, y, or the
empty string if none."""
return self.tk.call(self._w, "identify", x, y)
def index(self, tab_id):
"""Returns the numeric index of the tab specified by tab_id, or
the total number of tabs if tab_id is the string "end"."""
return self.tk.getint(self.tk.call(self._w, "index", tab_id))
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified position.
pos is either the string end, an integer index, or the name of
a managed child. If child is already managed by the notebook,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def select(self, tab_id=None):
"""Selects the specified tab.
The associated child window will be displayed, and the
previously-selected window (if different) is unmapped. If tab_id
is omitted, returns the widget name of the currently selected
pane."""
return self.tk.call(self._w, "select", tab_id)
def tab(self, tab_id, option=None, **kw):
"""Query or modify the options of the specific tab_id.
If kw is not given, returns a dict of the tab option values. If option
is specified, returns the value of that option. Otherwise, sets the
options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "tab", tab_id)
def tabs(self):
"""Returns a list of windows managed by the notebook."""
return self.tk.splitlist(self.tk.call(self._w, "tabs") or ())
def enable_traversal(self):
"""Enable keyboard traversal for a toplevel window containing
this notebook.
This will extend the bindings for the toplevel window containing
this notebook as follows:
Control-Tab: selects the tab following the currently selected
one
Shift-Control-Tab: selects the tab preceding the currently
selected one
Alt-K: where K is the mnemonic (underlined) character of any
tab, will select that tab.
Multiple notebooks in a single toplevel may be enabled for
traversal, including nested notebooks. However, notebook traversal
only works properly if all panes are direct children of the
notebook."""
# The only, and good, difference I see is about mnemonics, which works
# after calling this method. Control-Tab and Shift-Control-Tab always
# works (here at least).
self.tk.call("ttk::notebook::enableTraversal", self._w)
class Panedwindow(Widget, Tkinter.PanedWindow):
"""Ttk Panedwindow widget displays a number of subwindows, stacked
either vertically or horizontally."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Panedwindow with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, width, height
PANE OPTIONS
weight
"""
Widget.__init__(self, master, "ttk::panedwindow", kw)
forget = Tkinter.PanedWindow.forget # overrides Pack.forget
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified positions.
pos is either the string end, and integer index, or the name
of a child. If child is already managed by the paned window,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def pane(self, pane, option=None, **kw):
"""Query or modify the options of the specified pane.
pane is either an integer index or the name of a managed subwindow.
If kw is not given, returns a dict of the pane option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "pane", pane)
def sashpos(self, index, newpos=None):
"""If newpos is specified, sets the position of sash number index.
May adjust the positions of adjacent sashes to ensure that
positions are monotonically increasing. Sash positions are further
constrained to be between 0 and the total size of the widget.
Returns the new position of sash number index."""
return self.tk.getint(self.tk.call(self._w, "sashpos", index, newpos))
PanedWindow = Panedwindow # Tkinter name compatibility
class Progressbar(Widget):
"""Ttk Progressbar widget shows the status of a long-running
operation. They can operate in two modes: determinate mode shows the
amount completed relative to the total amount of work to be done, and
indeterminate mode provides an animated display to let the user know
that something is happening."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Progressbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, length, mode, maximum, value, variable, phase
"""
Widget.__init__(self, master, "ttk::progressbar", kw)
def start(self, interval=None):
"""Begin autoincrement mode: schedules a recurring timer event
that calls method step every interval milliseconds.
interval defaults to 50 milliseconds (20 steps/second) if omitted."""
self.tk.call(self._w, "start", interval)
def step(self, amount=None):
"""Increments the value option by amount.
amount defaults to 1.0 if omitted."""
self.tk.call(self._w, "step", amount)
def stop(self):
"""Stop autoincrement mode: cancels any recurring timer event
initiated by start."""
self.tk.call(self._w, "stop")
class Radiobutton(Widget):
"""Ttk Radiobutton widgets are used in groups to show or change a
set of mutually-exclusive options."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Radiobutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, value, variable
"""
Widget.__init__(self, master, "ttk::radiobutton", kw)
def invoke(self):
"""Sets the option variable to the option value, selects the
widget, and invokes the associated command.
Returns the result of the command, or an empty string if
no command is specified."""
return self.tk.call(self._w, "invoke")
class Scale(Widget, Tkinter.Scale):
"""Ttk Scale widget is typically used to control the numeric value of
a linked variable that varies uniformly over some range."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scale with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, from, length, orient, to, value, variable
"""
Widget.__init__(self, master, "ttk::scale", kw)
def configure(self, cnf=None, **kw):
"""Modify or query scale options.
Setting a value for any of the "from", "from_" or "to" options
generates a <<RangeChanged>> event."""
if cnf:
kw.update(cnf)
Widget.configure(self, **kw)
if any(['from' in kw, 'from_' in kw, 'to' in kw]):
self.event_generate('<<RangeChanged>>')
def get(self, x=None, y=None):
"""Get the current value of the value option, or the value
corresponding to the coordinates x, y if they are specified.
x and y are pixel coordinates relative to the scale widget
origin."""
return self.tk.call(self._w, 'get', x, y)
class Scrollbar(Widget, Tkinter.Scrollbar):
"""Ttk Scrollbar controls the viewport of a scrollable widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scrollbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, orient
"""
Widget.__init__(self, master, "ttk::scrollbar", kw)
class Separator(Widget):
"""Ttk Separator widget displays a horizontal or vertical separator
bar."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Separator with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient
"""
Widget.__init__(self, master, "ttk::separator", kw)
class Sizegrip(Widget):
"""Ttk Sizegrip allows the user to resize the containing toplevel
window by pressing and dragging the grip."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Sizegrip with parent master.
STANDARD OPTIONS
class, cursor, state, style, takefocus
"""
Widget.__init__(self, master, "ttk::sizegrip", kw)
class Treeview(Widget, Tkinter.XView, Tkinter.YView):
"""Ttk Treeview widget displays a hierarchical collection of items.
Each item has a textual label, an optional image, and an optional list
of data values. The data values are displayed in successive columns
after the tree label."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Treeview with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand,
yscrollcommand
WIDGET-SPECIFIC OPTIONS
columns, displaycolumns, height, padding, selectmode, show
ITEM OPTIONS
text, image, values, open, tags
TAG OPTIONS
foreground, background, font, image
"""
Widget.__init__(self, master, "ttk::treeview", kw)
def bbox(self, item, column=None):
"""Returns the bounding box (relative to the treeview widget's
window) of the specified item in the form x y width height.
If column is specified, returns the bounding box of that cell.
If the item is not visible (i.e., if it is a descendant of a
closed item or is scrolled offscreen), returns an empty string."""
return self._getints(self.tk.call(self._w, "bbox", item, column)) or ''
def get_children(self, item=None):
"""Returns a tuple of children belonging to item.
If item is not specified, returns root children."""
return self.tk.splitlist(
self.tk.call(self._w, "children", item or '') or ())
def set_children(self, item, *newchildren):
"""Replaces item's child with newchildren.
Children present in item that are not present in newchildren
are detached from tree. No items in newchildren may be an
ancestor of item."""
self.tk.call(self._w, "children", item, newchildren)
def column(self, column, option=None, **kw):
"""Query or modify the options for the specified column.
If kw is not given, returns a dict of the column option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "column", column)
def delete(self, *items):
"""Delete all specified items and all their descendants. The root
item may not be deleted."""
self.tk.call(self._w, "delete", items)
def detach(self, *items):
"""Unlinks all of the specified items from the tree.
The items and all of their descendants are still present, and may
be reinserted at another point in the tree, but will not be
displayed. The root item may not be detached."""
self.tk.call(self._w, "detach", items)
def exists(self, item):
"""Returns True if the specified item is present in the tree,
False otherwise."""
return self.tk.getboolean(self.tk.call(self._w, "exists", item))
def focus(self, item=None):
"""If item is specified, sets the focus item to item. Otherwise,
returns the current focus item, or '' if there is none."""
return self.tk.call(self._w, "focus", item)
def heading(self, column, option=None, **kw):
"""Query or modify the heading options for the specified column.
If kw is not given, returns a dict of the heading option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values.
Valid options/values are:
text: text
The text to display in the column heading
image: image_name
Specifies an image to display to the right of the column
heading
anchor: anchor
Specifies how the heading text should be aligned. One of
the standard Tk anchor values
command: callback
A callback to be invoked when the heading label is
pressed.
To configure the tree column heading, call this with column = "#0" """
cmd = kw.get('command')
if cmd and not isinstance(cmd, basestring):
# callback not registered yet, do it now
kw['command'] = self.master.register(cmd, self._substitute)
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, 'heading', column)
def identify(self, component, x, y):
"""Returns a description of the specified component under the
point given by x and y, or the empty string if no such component
is present at that position."""
return self.tk.call(self._w, "identify", component, x, y)
def identify_row(self, y):
"""Returns the item ID of the item at position y."""
return self.identify("row", 0, y)
def identify_column(self, x):
"""Returns the data column identifier of the cell at position x.
The tree column has ID #0."""
return self.identify("column", x, 0)
def identify_region(self, x, y):
"""Returns one of:
heading: Tree heading area.
separator: Space between two columns headings;
tree: The tree area.
cell: A data cell.
* Availability: Tk 8.6"""
return self.identify("region", x, y)
def identify_element(self, x, y):
"""Returns the element at position x, y.
* Availability: Tk 8.6"""
return self.identify("element", x, y)
def index(self, item):
"""Returns the integer index of item within its parent's list
of children."""
return self.tk.getint(self.tk.call(self._w, "index", item))
def insert(self, parent, index, iid=None, **kw):
"""Creates a new item and return the item identifier of the newly
created item.
parent is the item ID of the parent item, or the empty string
to create a new top-level item. index is an integer, or the value
end, specifying where in the list of parent's children to insert
the new item. If index is less than or equal to zero, the new node
is inserted at the beginning, if index is greater than or equal to
the current number of children, it is inserted at the end. If iid
is specified, it is used as the item identifier, iid must not
already exist in the tree. Otherwise, a new unique identifier
is generated."""
opts = _format_optdict(kw)
if iid is not None:
res = self.tk.call(self._w, "insert", parent, index,
"-id", iid, *opts)
else:
res = self.tk.call(self._w, "insert", parent, index, *opts)
return res
def item(self, item, option=None, **kw):
"""Query or modify the options for the specified item.
If no options are given, a dict with options/values for the item
is returned. If option is specified then the value for that option
is returned. Otherwise, sets the options to the corresponding
values as given by kw."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "item", item)
def move(self, item, parent, index):
"""Moves item to position index in parent's list of children.
It is illegal to move an item under one of its descendants. If
index is less than or equal to zero, item is moved to the
beginning, if greater than or equal to the number of children,
it is moved to the end. If item was detached it is reattached."""
self.tk.call(self._w, "move", item, parent, index)
reattach = move # A sensible method name for reattaching detached items
def next(self, item):
"""Returns the identifier of item's next sibling, or '' if item
is the last child of its parent."""
return self.tk.call(self._w, "next", item)
def parent(self, item):
"""Returns the ID of the parent of item, or '' if item is at the
top level of the hierarchy."""
return self.tk.call(self._w, "parent", item)
def prev(self, item):
"""Returns the identifier of item's previous sibling, or '' if
item is the first child of its parent."""
return self.tk.call(self._w, "prev", item)
def see(self, item):
"""Ensure that item is visible.
Sets all of item's ancestors open option to True, and scrolls
the widget if necessary so that item is within the visible
portion of the tree."""
self.tk.call(self._w, "see", item)
def selection(self, selop=None, items=None):
"""If selop is not specified, returns selected items."""
if isinstance(items, basestring):
items = (items,)
return self.tk.splitlist(self.tk.call(self._w, "selection", selop, items))
def selection_set(self, items):
"""items becomes the new selection."""
self.selection("set", items)
def selection_add(self, items):
"""Add items to the selection."""
self.selection("add", items)
def selection_remove(self, items):
"""Remove items from the selection."""
self.selection("remove", items)
def selection_toggle(self, items):
"""Toggle the selection state of each item in items."""
self.selection("toggle", items)
def set(self, item, column=None, value=None):
"""Query or set the value of given item.
With one argument, return a dictionary of column/value pairs
for the specified item. With two arguments, return the current
value of the specified column. With three arguments, set the
value of given column in given item to the specified value."""
res = self.tk.call(self._w, "set", item, column, value)
if column is None and value is None:
return _splitdict(self.tk, res,
cut_minus=False, conv=_tclobj_to_py)
else:
return res
def tag_bind(self, tagname, sequence=None, callback=None):
"""Bind a callback for the given event sequence to the tag tagname.
When an event is delivered to an item, the callbacks for each
of the item's tags option are called."""
self._bind((self._w, "tag", "bind", tagname), sequence, callback, add=0)
def tag_configure(self, tagname, option=None, **kw):
"""Query or modify the options for the specified tagname.
If kw is not given, returns a dict of the option settings for tagname.
If option is specified, returns the value for that option for the
specified tagname. Otherwise, sets the options to the corresponding
values for the given tagname."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "tag", "configure",
tagname)
def tag_has(self, tagname, item=None):
"""If item is specified, returns 1 or 0 depending on whether the
specified item has the given tagname. Otherwise, returns a list of
all items which have the specified tag.
* Availability: Tk 8.6"""
if item is None:
return self.tk.splitlist(
self.tk.call(self._w, "tag", "has", tagname))
else:
return self.tk.getboolean(
self.tk.call(self._w, "tag", "has", tagname, item))
# Extensions
class LabeledScale(Frame, object):
"""A Ttk Scale widget with a Ttk Label widget indicating its
current value.
The Ttk Scale can be accessed through instance.scale, and Ttk Label
can be accessed through instance.label"""
def __init__(self, master=None, variable=None, from_=0, to=10, **kw):
"""Construct a horizontal LabeledScale with parent master, a
variable to be associated with the Ttk Scale widget and its range.
If variable is not specified, a Tkinter.IntVar is created.
WIDGET-SPECIFIC OPTIONS
compound: 'top' or 'bottom'
Specifies how to display the label relative to the scale.
Defaults to 'top'.
"""
self._label_top = kw.pop('compound', 'top') == 'top'
Frame.__init__(self, master, **kw)
self._variable = variable or Tkinter.IntVar(master)
self._variable.set(from_)
self._last_valid = from_
self.label = Label(self)
self.scale = Scale(self, variable=self._variable, from_=from_, to=to)
self.scale.bind('<<RangeChanged>>', self._adjust)
# position scale and label according to the compound option
scale_side = 'bottom' if self._label_top else 'top'
label_side = 'top' if scale_side == 'bottom' else 'bottom'
self.scale.pack(side=scale_side, fill='x')
tmp = Label(self).pack(side=label_side) # place holder
self.label.place(anchor='n' if label_side == 'top' else 's')
# update the label as scale or variable changes
self.__tracecb = self._variable.trace_variable('w', self._adjust)
self.bind('<Configure>', self._adjust)
self.bind('<Map>', self._adjust)
def destroy(self):
"""Destroy this widget and possibly its associated variable."""
try:
self._variable.trace_vdelete('w', self.__tracecb)
except AttributeError:
# widget has been destroyed already
pass
else:
del self._variable
Frame.destroy(self)
self.label = None
self.scale = None
def _adjust(self, *args):
"""Adjust the label position according to the scale."""
def adjust_label():
self.update_idletasks() # "force" scale redraw
x, y = self.scale.coords()
if self._label_top:
y = self.scale.winfo_y() - self.label.winfo_reqheight()
else:
y = self.scale.winfo_reqheight() + self.label.winfo_reqheight()
self.label.place_configure(x=x, y=y)
from_ = _to_number(self.scale['from'])
to = _to_number(self.scale['to'])
if to < from_:
from_, to = to, from_
newval = self._variable.get()
if not from_ <= newval <= to:
# value outside range, set value back to the last valid one
self.value = self._last_valid
return
self._last_valid = newval
self.label['text'] = newval
self.after_idle(adjust_label)
def _get_value(self):
"""Return current scale value."""
return self._variable.get()
def _set_value(self, val):
"""Set new scale value."""
self._variable.set(val)
value = property(_get_value, _set_value)
class OptionMenu(Menubutton):
"""Themed OptionMenu, based after Tkinter's OptionMenu, which allows
the user to select a value from a menu."""
def __init__(self, master, variable, default=None, *values, **kwargs):
"""Construct a themed OptionMenu widget with master as the parent,
the resource textvariable set to variable, the initially selected
value specified by the default parameter, the menu values given by
*values and additional keywords.
WIDGET-SPECIFIC OPTIONS
style: stylename
Menubutton style.
direction: 'above', 'below', 'left', 'right', or 'flush'
Menubutton direction.
command: callback
A callback that will be invoked after selecting an item.
"""
kw = {'textvariable': variable, 'style': kwargs.pop('style', None),
'direction': kwargs.pop('direction', None)}
Menubutton.__init__(self, master, **kw)
self['menu'] = Tkinter.Menu(self, tearoff=False)
self._variable = variable
self._callback = kwargs.pop('command', None)
if kwargs:
raise Tkinter.TclError('unknown option -%s' % (
kwargs.iterkeys().next()))
self.set_menu(default, *values)
def __getitem__(self, item):
if item == 'menu':
return self.nametowidget(Menubutton.__getitem__(self, item))
return Menubutton.__getitem__(self, item)
def set_menu(self, default=None, *values):
"""Build a new menu of radiobuttons with *values and optionally
a default value."""
menu = self['menu']
menu.delete(0, 'end')
for val in values:
menu.add_radiobutton(label=val,
command=Tkinter._setit(self._variable, val, self._callback),
variable=self._variable)
if default:
self._variable.set(default)
def destroy(self):
"""Destroy this widget and its associated variable."""
try:
del self._variable
except AttributeError:
pass
Menubutton.destroy(self)
| gpl-3.0 | 4,497,026,299,902,996,000 | 33.461963 | 93 | 0.607747 | false | 4.195146 | false | false | false |
randall-frank/heresy | card_objects.py | 1 | 23728 | #
# T.I.M.E Stories card editor
# Copyright (C) 2017 Randall Frank
# See LICENSE for details
#
import base64
import os
import os.path
from PyQt5 import QtXml
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import QtWidgets
# these are the core objects that represent a deck of cards to the editor
class Base(object):
def __init__(self, name, xml_tag):
self.name = name
self.xml_tag = xml_tag
def get_column_info(self, col):
return ""
def set_xml_name(self, name):
self.xml_tag = name
def get_xml_name(self):
return self.xml_tag
def load_attrib_string(self, elem, name, default=None):
QtWidgets.QApplication.processEvents()
tmp = elem.firstChildElement(name)
v = default
if not tmp.isNull():
v = str(tmp.text())
if v is not None:
self.__setattr__(name, v)
def save_attrib_string(self, doc, parent, name):
tmp = doc.createElement(name)
parent.appendChild(tmp)
s = self.__getattribute__(name)
text = doc.createTextNode(str(s))
tmp.appendChild(text)
def load_attrib_int(self, elem, name, default=None):
tmp = elem.firstChildElement(name)
v = default
if not tmp.isNull():
v = int(str(tmp.text()))
if v is not None:
self.__setattr__(name, v)
def save_attrib_int(self, doc, parent, name):
self.save_attrib_string(doc, parent, name)
def load_attrib_obj(self, elem, name, default=None):
tmp = elem.firstChildElement(name)
obj = default
if not tmp.isNull():
obj = eval(str(tmp.text()))
if obj is not None:
self.__setattr__(name, obj)
def save_attrib_obj(self, doc, parent, name):
tmp = doc.createElement(name)
parent.appendChild(tmp)
obj = self.__getattribute__(name)
text = doc.createTextNode(obj.__repr__())
tmp.appendChild(text)
def to_xml(self, doc, parent):
QtWidgets.QApplication.processEvents()
tmp = doc.createElement(self.xml_tag)
tmp.setAttribute('name', self.name)
parent.appendChild(tmp)
return self.to_element(doc, tmp)
def to_element(self, doc, elem):
return True
class Renderable(Base):
def __init__(self, name, xml_tag='renderable'):
super(Renderable, self).__init__(name, xml_tag)
self.order = 0 # Z depth...
self.rotation = 0
self.rectangle = [0, 0, 0, 0]
def render_object(self):
return
class ImageRender(Renderable):
def __init__(self, name="image"):
super(ImageRender, self).__init__(name, 'render_image')
self.image = ""
def get_column_info(self, col):
if col != 1:
return super(ImageRender, self).get_column_info(col)
return "%d,%d - %d,%d" % tuple(self.rectangle)
@classmethod
def from_element(cls, elem):
obj = ImageRender()
obj.load_attrib_string(elem, "image")
obj.load_attrib_obj(elem, "rectangle")
obj.load_attrib_int(elem, "rotation")
obj.load_attrib_int(elem, "order")
return obj
def to_element(self, doc, elem):
self.save_attrib_string(doc, elem, "image")
self.save_attrib_int(doc, elem, "rotation")
self.save_attrib_obj(doc, elem, "rectangle")
self.save_attrib_int(doc, elem, "order")
return True
class RectRender(Renderable):
def __init__(self, name="rect"):
super(RectRender, self).__init__(name, 'render_rect')
self.style = "default"
def get_column_info(self, col):
if col != 1:
return super(RectRender, self).get_column_info(col)
return "%d,%d - %d,%d" % tuple(self.rectangle)
@classmethod
def from_element(cls, elem):
obj = RectRender()
obj.load_attrib_string(elem, "style")
obj.load_attrib_int(elem, "rotation")
obj.load_attrib_obj(elem, "rectangle")
obj.load_attrib_int(elem, "order")
return obj
def to_element(self, doc, elem):
self.save_attrib_string(doc, elem, "style")
self.save_attrib_int(doc, elem, "rotation")
self.save_attrib_obj(doc, elem, "rectangle")
self.save_attrib_int(doc, elem, "order")
return True
class TextRender(Renderable):
def __init__(self, name="text"):
super(TextRender, self).__init__(name, 'render_text')
self.style = "default"
self.text = ""
def get_column_info(self, col):
if col != 1:
return super(TextRender, self).get_column_info(col)
return "%d,%d - %d,%d" % tuple(self.rectangle)
@classmethod
def from_element(cls, elem):
obj = TextRender()
obj.load_attrib_string(elem, "text")
obj.load_attrib_string(elem, "style")
obj.load_attrib_int(elem, "rotation")
obj.load_attrib_obj(elem, "rectangle")
obj.load_attrib_int(elem, "order")
return obj
def to_element(self, doc, elem):
self.save_attrib_string(doc, elem, "text")
self.save_attrib_string(doc, elem, "style")
self.save_attrib_int(doc, elem, "rotation")
self.save_attrib_obj(doc, elem, "rectangle")
self.save_attrib_int(doc, elem, "order")
return True
# Essentially, a Face is a list of renderable items. Right now, text or image items
# that reference styles and images, along with content.
class Face(Base):
def __init__(self, name):
super(Face, self).__init__(name, name)
self.renderables = list() # a face is an array of Renderable instances
@classmethod
def from_element(cls, elem, is_top):
name = "top"
if not is_top:
name = "bottom"
obj = Face(name)
obj.set_xml_name(name)
# walk element children... and map to 'renderables'
obj.renderables = list()
tmp = elem.firstChildElement()
while not tmp.isNull():
tag = str(tmp.tagName())
if tag.endswith('image'):
tmp_obj = ImageRender.from_element(tmp)
elif tag.endswith('text'):
tmp_obj = TextRender.from_element(tmp)
elif tag.endswith('rect'):
tmp_obj = RectRender.from_element(tmp)
else:
tmp_obj = None
if tmp_obj is not None:
obj.renderables.append(tmp_obj)
tmp = tmp.nextSiblingElement()
return obj
def to_element(self, doc, elem):
for r in self.renderables:
r.to_xml(doc, elem)
return True
class Card(Base):
def __init__(self, name, xml_tag='card'):
super(Card, self).__init__(name, xml_tag)
self.top_face = Face('top')
self.bot_face = Face('bottom')
self.card_number = 0
self.local_card_number = 0
@classmethod
def from_element(cls, elem):
name = elem.attribute("name", "Unnamed Card")
obj = Card(str(name))
tmp = elem.firstChildElement("top")
if not tmp.isNull():
obj.top_face = Face.from_element(tmp, True)
tmp = elem.firstChildElement("bottom")
if not tmp.isNull():
obj.bot_face = Face.from_element(tmp, False)
return obj
def to_element(self, doc, elem):
self.top_face.to_xml(doc, elem)
self.bot_face.to_xml(doc, elem)
return True
class Location(Base):
def __init__(self, name):
super(Location, self).__init__(name, 'location')
self.cards = list()
@classmethod
def from_element(cls, elem):
name = elem.attribute("name", "Unnamed Location")
obj = Location(str(name))
tmp = elem.firstChildElement("card")
while not tmp.isNull():
tmp_card = Card.from_element(tmp)
if tmp_card is not None:
obj.cards.append(tmp_card)
tmp = tmp.nextSiblingElement('card')
return None
def to_element(self, doc, elem):
for c in self.cards:
c.to_xml(doc, elem)
return True
class Style(Base):
def __init__(self, name):
super(Style, self).__init__(name, 'style')
self.typeface = "Arial"
self.typesize = 12
self.fillcolor = [255, 255, 255, 255]
self.borderthickness = 0
self.bordercolor = [0, 0, 0, 255]
self.textcolor = [0, 0, 0, 255]
self.linestyle = "solid"
@classmethod
def from_element(cls, elem):
name = elem.attribute("name", "Unnamed Image")
obj = Style(str(name))
obj.load_attrib_string(elem, "typeface", "Arial")
obj.load_attrib_string(elem, "linestyle")
obj.load_attrib_obj(elem, "fillcolor")
obj.load_attrib_obj(elem, "bordercolor")
obj.load_attrib_obj(elem, "textcolor")
obj.load_attrib_int(elem, "typesize")
obj.load_attrib_int(elem, "borderthickness")
return obj
def to_element(self, doc, elem):
self.save_attrib_string(doc, elem, "typeface")
self.save_attrib_string(doc, elem, "linestyle")
self.save_attrib_obj(doc, elem, "fillcolor")
self.save_attrib_obj(doc, elem, "bordercolor")
self.save_attrib_obj(doc, elem, "textcolor")
self.save_attrib_int(doc, elem, "typesize")
self.save_attrib_int(doc, elem, "borderthickness")
return True
class Image(Base):
def __init__(self, name):
super(Image, self).__init__(name, 'image')
self.file = ''
self.rectangle = [0, 0, 0, 0] # x,y,dx,dy
self.usage = 'any'
def get_image(self, deck):
f = deck.find_file(self.file)
if f is None:
return None
img = f.image.copy(self.rectangle[0], self.rectangle[1], self.rectangle[2], self.rectangle[3]) # QImage
return img
def get_column_info(self, col):
if col != 1:
return super(Image, self).get_column_info(col)
return "%d,%d - %d,%d" % tuple(self.rectangle)
@classmethod
def from_element(cls, elem):
name = elem.attribute("name", "Unnamed Image")
obj = Image(str(name))
obj.load_attrib_string(elem, "file")
obj.load_attrib_obj(elem, "rectangle")
obj.load_attrib_string(elem, "usage")
return obj
def to_element(self, doc, elem):
self.save_attrib_string(doc, elem, "file")
self.save_attrib_obj(doc, elem, "rectangle")
self.save_attrib_string(doc, elem, "usage")
return True
class File(Base):
def __init__(self, name):
super(File, self).__init__(name, 'file')
self.image = QtGui.QImage()
self.filename = ""
self.store_inline = True
def load_file(self, filename, name=None, store_as_resource=True):
self.image.load(filename)
self.filename = filename
self.store_inline = store_as_resource
if name is not None:
self.name = name
else:
self.name = filename
def get_column_info(self, col):
if col != 1:
return super(File, self).get_column_info(col)
return "%dx%d" % tuple(self.size())
def size(self):
return [self.image.width(), self.image.height()]
@classmethod
def from_element(cls, elem):
QtWidgets.QApplication.processEvents()
name = elem.attribute("name", "Unnamed File")
filename = elem.attribute("filename", None)
obj = File(name)
# two cases: text is the file content or text is empty
# in the latter case, try to read the 'name' as a file
try:
tmp = elem.text() # get unicode string
if len(tmp) == 0:
if not obj.image.load(filename, name):
print("Warning, failed to load file: {}".format(filename))
return None
else:
tmp = bytes(tmp, "UTF-8") # convert to ASCII 8bit bytes
s = base64.b64decode(tmp) # decode to binary
buffer = QtCore.QBuffer() # do the I/O
buffer.setData(s)
buffer.open(QtCore.QIODevice.ReadWrite)
if not obj.image.load(buffer, "png"):
if not obj.image.load(filename, name):
return None
except Exception as e:
print("File from_element Error", str(e))
return None
return obj
def to_element(self, doc, elem):
try:
if self.store_inline:
buffer = QtCore.QBuffer()
buffer.open(QtCore.QIODevice.ReadWrite)
self.image.save(buffer, "png") # Do the I/O
s = base64.b64encode(buffer.data()) # encode binary data as ASCII 8bit bytes
tmp = s.decode(encoding="UTF-8") # convert the ASCII 8bit sequence to Unicode
text = doc.createTextNode(tmp) # Add it to the DOM
elem.appendChild(text)
elem.setAttribute('filename', self.filename)
except Exception as e:
print("File to_element Error", str(e))
return False
return True
class Deck(Base):
def __init__(self, name=""):
super(Deck, self).__init__(name, 'deck')
self.files = list() # of Files
self.images = list() # of Images
self.styles = list() # of Styles
self.default_card = Card("Card Base", xml_tag="defaultcard")
self.default_item_card = Card("Item Card Base", xml_tag="defaultitemcard")
self.default_location_card = Card("Location Card Base", xml_tag="defaultlocationcard")
# Proper order of a deck
self.base = list() # of Cards
self.items = list() # of Cards
self.plan = list() # of Cards
self.misc = list() # of Cards
self.characters = list() # of Cards
self.icon_reference = Card("Icon Reference", xml_tag='iconreference')
self.locations = list() # of Locations
def find_file(self, name, default=None):
for f in self.files:
if f.name == name:
return f
return default
def find_image(self, name, default=None):
for i in self.images:
if i.name == name:
return i
return default
def find_style(self, name, default=Style("default")):
for s in self.styles:
if s.name == name:
return s
return default
def renumber_entities(self):
global_count = 1
# card blocks
for chunk in [self.base, self.items, self.plan, self.misc, self.characters]:
local_count = 1
for card in chunk:
card.card_number = global_count
card.local_card_number = local_count
global_count += 1
local_count += 1
# reference card
self.icon_reference.card_number = global_count
self.icon_reference.local_card_number = local_count
global_count += 1
local_count += 1
# locations
for location in self.locations:
local_count = 1
for card in location:
card.card_number = global_count
card.local_card_number = local_count
global_count += 1
local_count += 1
def save(self, filename):
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
doc = QtXml.QDomDocument()
# build the DOM
self.to_xml(doc, doc)
# convert the DOM to a string
s = doc.toString()
success = True
try:
fp = open(filename, "wb")
fp.write(bytes(s, "UTF-8"))
fp.close()
except Exception as e:
success = False
QtWidgets.QApplication.restoreOverrideCursor()
return success
def load(self, filename):
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
try:
fp = open(filename, "rb")
xml = fp.read()
fp.close()
except:
QtWidgets.QApplication.restoreOverrideCursor()
return False
doc = QtXml.QDomDocument()
ok, msg, line, col = doc.setContent(xml)
if not ok:
QtWidgets.QApplication.restoreOverrideCursor()
return False
deck = doc.firstChildElement("deck")
if not deck.isNull():
assets = deck.firstChildElement("assets") # the <assets> block
if not assets.isNull():
if not self.parse_assets(assets):
QtWidgets.QApplication.restoreOverrideCursor()
return False
cards = deck.firstChildElement("cards") # the <cards> block
if not cards.isNull():
if not self.parse_cards(cards):
QtWidgets.QApplication.restoreOverrideCursor()
return False
QtWidgets.QApplication.restoreOverrideCursor()
return True
def parse_cards(self, root):
# single cards
# default cards (layering) and the reference card
work = dict(defaultcard=[Card, 'default_card'],
defaultitemcard=[Card, 'default_item_card'],
defaultlocationcard=[Card, 'default_location_card'],
iconreference=[Card, 'icon_reference'])
for tag, v in work.items():
tmp = root.firstChildElement(tag)
if not tmp.isNull():
tmp_obj = v[0].from_element(tmp)
if tmp_obj is not None:
tmp_obj.set_xml_name(tag)
self.__setattr__(v[1], tmp_obj)
# Plan, Items, Base, Characters, Locations - simple lists
# [v0, v1, v2] use v0.from_element() to create an object starting at the tag v2
# make a list of objects at self.{v1}
work = dict(base=[Card, 'base', 'card'],
items=[Card, 'items', 'card'],
plan=[Card, 'plan', 'card'],
misc=[Card, 'misc', 'card'],
characters=[Card, 'characters', 'card'],
locations=[Location, 'locations', 'location'])
for tag, v in work.items():
tmp_root = root.firstChildElement(tag)
if not tmp_root.isNull():
self.__setattr__(v[1], list())
tmp = tmp_root.firstChildElement(v[2])
while not tmp.isNull():
tmp_obj = v[0].from_element(tmp)
if tmp_obj is not None:
self.__getattribute__(v[1]).append(tmp_obj)
tmp = tmp.nextSiblingElement(v[2])
return True
def parse_assets(self, root):
work = dict(file=[File, self.files],
image=[Image, self.images],
style=[Style, self.styles])
for tag, v in work.items():
tmp = root.firstChildElement(tag)
while not tmp.isNull():
tmp_obj = v[0].from_element(tmp)
if tmp_obj is not None:
v[1].append(tmp_obj)
tmp = tmp.nextSiblingElement(tag)
return True
def to_element(self, doc, elem): # the deck element
# assets
tmp = doc.createElement("assets")
elem.appendChild(tmp)
# files, styles, images
for f in self.files:
f.to_xml(doc, tmp)
for s in self.styles:
s.to_xml(doc, tmp)
for i in self.images:
i.to_xml(doc, tmp)
# cards
card_root = doc.createElement("cards")
elem.appendChild(card_root)
# singletons
self.default_card.to_xml(doc, card_root)
self.default_item_card.to_xml(doc, card_root)
self.default_location_card.to_xml(doc, card_root)
self.icon_reference.to_xml(doc, card_root)
# lists: base, items, plan, misc, characters, locations
blocks = dict(base=self.base, plan=self.plan, items=self.items, misc=self.misc,
characters=self.characters, locations=self.locations)
for tag, v in blocks.items():
tag_elem = doc.createElement(tag) # make an element inside of <cards>
card_root.appendChild(tag_elem)
for i in v:
i.to_xml(doc, tag_elem) # write all of the cards into the new element
return True
def build_empty_deck(media_dirs=None):
deck = Deck()
if media_dirs is None:
# Load images from resources
d = QtCore.QDir(":/default_files")
for name in d.entryList():
f = File(name)
f.load_file(":/default_files/"+name, name, store_as_resource=True)
deck.files.append(f)
else:
for d in media_dirs:
for root, dirs, files in os.walk(d):
for name in files:
filename = os.path.join(root, name)
basename, ext = os.path.splitext(os.path.basename(filename))
if ext.lower() in [".jpg", ".png"]:
print("Adding image: {} ({})".format(filename, basename))
f = File(basename)
f.load_file(filename, basename, store_as_resource=False)
deck.files.append(f)
# a default style
deck.styles.append(Style("default"))
return deck
# <deck>
# <assets>
# <file name="name">pngcontent</file>
# <image name="name">
# <file>name</file>
# <rect_pix>x0 y0 dx dy</rect_pix>
# <usage>face|badge|token...</usage>
# </locked/>
# </image>
# <style name="name">
# <typeface>name</typeface>
# <typesize>size</typesize>
# <fillcolor></fillcolor>
# <borderthickness></borderthickness>
# <bordercolor></bordercolor>
# <textcolor></textcolor>
# <linestyle></linestyle>
# </style>
# </assets>
# <cards>
# <defaultcard>
# <top>
# <image name="top"></image>
# <textblock name="locationname"></textblock>
# </top>
# <bottom>
# <image name="bottom"></image>
# </bottom>
# </defaultcard>
# <defaultitemcard>
# <top>
# <image name="top"></image>
# <textblock name="locationname"></textblock>
# </top>
# <bottom>
# <image name="bottom"></image>
# </bottom>
# </defaultitemcard>
# <defaultlocationcard>
# <top>
# <image name="top"></image>
# <textblock name="locationname"></textblock>
# </top>
# <bottom>
# <image name="bottom"></image>
# </bottom>
# </defaultlocationcard>
# <base>
# <card></card>
# <card></card>
# <card></card>
# <card></card>
# <card></card>
# </base>
# <iconreference>
# <top></top>
# <bottom></bottom>
# </iconreference>
# <characters>
# <card></card>
# <card></card>
# <card></card>
# </characters>
# <plan>
# <card></card>
# <card></card>
# <card></card>
# <card></card>
# </plan>
# <items>
# <card></card>
# <card></card>
# </items>
# <misc>
# <card></card>
# <card></card>
# </misc>
# <locations>
# <location>
# <card></card>
# <card></card>
# <card></card>
# </location>
# <location>
# <card></card>
# <card></card>
# <card></card>
# </location>
# </locations>
# </cards>
# </deck>
# <card name="name">
# <top>
# <render_text name="">
# <rotation>angle</rotation>
# <style>style</style>
# <rectangle>x y dx dy</rectangle>
# </render_text>
# <render_image name="">
# 
# <rectangle>x y dx dy</rectangle>
# </render_image>
# </top>
# <bottom>
# </bottom>
# </card>
| mit | 1,640,969,313,959,279,400 | 32.001391 | 112 | 0.553355 | false | 3.569731 | false | false | false |
nmardosz/freedb-to-json-parser | freedbparser.py | 1 | 7260 | import os
import sys
import xml.sax.handler
import xml.sax
import codecs
import re
import time
import json
reload(sys)
sys.setdefaultencoding('utf8')
releaseCounter = 0
if ( __name__ == "__main__"):
trackslength = []
#trackoffsets = []
disclength = []
prevcombinenum = ''
trackcombinenum = ''
partialtracktitle = ''
tracktitle = []
#alltracksnolast = 0
discartist = ''
disctitle = ''
nexttitle = ''
discyear = ''
discgenre = ''
formattedtrackslengths = []
formattedtracktitles = []
indexer = ''
morethanonetitle = 0
aftertrackframeoffset = 0
genreleaseidname = ''
genreleasenametoid = ''
fo = codecs.open('releases.json','w',encoding='utf8')
fl = codecs.open('parselog.txt','w',encoding='utf8')
#fo = open('releases.json','w')
trackframeoffset = re.compile("#[ \t]+Track[ \t]+frame[ \t]+offsets:")
framematch = re.compile("#[ +\t+]+[0-9]+")
framematchnos = re.compile("#[0-9]+")
framedontmatch = re.compile("#[ +\t+]+[0-9]+[ +\t+\-\_+a-z+:+]+")
disclengthmatch = re.compile("# +Disc +length: +[0-9]+")
tracktitlematch = re.compile("TTITLE[0-9]+=.*")
discartisttitle = re.compile("DTITLE=.*")
discyearmatch = re.compile("DYEAR=.*")
discgenrematch = re.compile("DGENRE=.*")
artiststitlematch = re.compile(" \/ ")
indir = 'D:/FreeDB/FreedbDump/'
for root, dirs, filenames in os.walk(indir):
for filename in filenames:
#with open("65078809") as infile:
if (os.stat(os.path.join(root, filename)).st_size == 0):
continue
with open(os.path.join(root, filename)) as infile:
#print(filename)
fl.write(os.path.join(root, filename) + '\n')
genreleaseidname = os.path.basename(os.path.normpath(root))
if (genreleaseidname == "blues"):
genreleasenametoid = "0001"
if (genreleaseidname == "classical"):
genreleasenametoid = "0002"
if (genreleaseidname == "country"):
genreleasenametoid = "0003"
if (genreleaseidname == "data"):
genreleasenametoid = "0004"
if (genreleaseidname == "folk"):
genreleasenametoid = "0005"
if (genreleaseidname == "jazz"):
genreleasenametoid = "0006"
if (genreleaseidname == "misc"):
genreleasenametoid = "0007"
if (genreleaseidname == "newage"):
genreleasenametoid = "0008"
if (genreleaseidname == "reggae"):
genreleasenametoid = "0009"
if (genreleaseidname == "rock"):
genreleasenametoid = "0010"
if (genreleaseidname == "soundtrack"):
genreleasenametoid = "0011"
for line in infile:
if (trackframeoffset.match(line)):
aftertrackframeoffset = 1
if (aftertrackframeoffset == 1):
if ((not framedontmatch.match(line)) and (framematch.match(line)) or (framematchnos.match(line))):
trackslength.append(map(int, re.findall('\d+', line)))
if (disclengthmatch.match(line)):
disclength.append(map(int, re.findall('\d+', line)))
if (tracktitlematch.match(line)):
trackcombinenum = line.split("=")[0]
if trackcombinenum == prevcombinenum:
prevcombinenum = line.split("=")[0]
partialtracktitle = tracktitle[-1]
partialtracktitle = partialtracktitle.rstrip() + line.split("=")[1].rstrip()
tracktitle[-1] = partialtracktitle
continue
if trackcombinenum != prevcombinenum:
prevcombinenum = line.split("=")[0]
tracktitle.append(line.split("=")[1])
continue
if (discartisttitle.match(line)):
morethanonetitle += 1
if (morethanonetitle == 1):
discartist = line.split(" / ")[0].decode('iso-8859-1').encode("utf-8").rstrip()
discartist = re.sub('DTITLE=', '', discartist)
try:
disctitle = line.split(" / ")[1].decode('iso-8859-1').encode("utf-8").rstrip()
if not disctitle:
disctitle = discartist
except:
disctitle = discartist
if (morethanonetitle > 1):
nexttitle = line.decode('iso-8859-1').encode("utf-8").rstrip()
nexttitle = re.sub('DTITLE=', '', nexttitle)
disctitle += nexttitle.decode('iso-8859-1').encode("utf-8")
nexttitle = ''
if (discyearmatch.match(line)):
discyear = line.split("=")[1]
if (discgenrematch.match(line)):
discgenre = line.split("=")[1]
for idx, item in enumerate(trackslength[:-1]):
currentframe = map(lambda x: float(x)/75, trackslength[idx])
nextframe = map(lambda x: float(x)/75, trackslength[idx + 1])
tracknumlength = [a - b for a, b in zip(nextframe, currentframe)]
m, s = divmod(tracknumlength[0], 60)
h, m = divmod(m, 60)
if(h == 0):
timeconv = "%d:%02d" % (m, s)
else:
timeconv = "%d:%02d:%02d" % (h, m, s)
#currentframe = int(currentframe) / 75
#nextframe = int(nextframe) / 75
#fo.write("tracknumber {0}: length: {1}\n".format(idx + 1, '' .join(map(str, timeconv))))
formattedtrackslengths.append(timeconv)
for item in disclength:
#'' .join(map(str, item))
lasttrackoffset = map(lambda x: float(x)/75, trackslength[-1])
lasttracklength = [a - b for a, b in zip(item, lasttrackoffset)]
m, s = divmod(lasttracklength[0], 60)
h, m = divmod(m, 60)
if(h == 0):
timeconv = "%d:%02d" % (m, s)
else:
timeconv = "%d:%02d:%02d" % (h, m, s)
#fo.write("tracknumber {0}: length: {1}\n".format(len(trackslength), timeconv))
formattedtrackslengths.append(timeconv)
for item in tracktitle:
#fo.write("Title: {0}".format(item))
formattedtracktitles.append(item.decode('iso-8859-1').encode("utf-8").rstrip())
fo.write('{"releaseid": ' + json.dumps(genreleasenametoid + filename.decode('iso-8859-1').encode("utf-8").lower().rstrip()) + ', ')
fo.write('"l_artist_name": ' + json.dumps(discartist.decode('iso-8859-1').encode("utf-8").lower().rstrip()) + ', ')
fo.write('"artist_name": ' + json.dumps(discartist.decode('iso-8859-1').encode("utf-8").rstrip()) + ', ')
fo.write('"l_title": ' + json.dumps(disctitle.decode('iso-8859-1').encode("utf-8").lower().rstrip()) + ', ')
fo.write('"title": ' + json.dumps(disctitle.decode('iso-8859-1').encode("utf-8").rstrip()) + ', ')
fo.write('"year": ' + json.dumps(discyear.decode('iso-8859-1').encode("utf-8").rstrip()) + ', ')
fo.write('"genre": ' + json.dumps(discgenre.decode('iso-8859-1').encode("utf-8").rstrip()) + ', ')
fo.write('"tracklist": [')
if (len(formattedtrackslengths) == 0):
fo.write(']')
if (len(formattedtrackslengths) > 0):
for idx, item in enumerate(formattedtrackslengths):
indexer = idx + 1
fo.write('{"track_position": ' + json.dumps(str(indexer)) + ', "track_title": ' + json.dumps(formattedtracktitles[idx]) + ', "track_duration": ' + json.dumps(formattedtrackslengths[idx]))
if (indexer == len(formattedtrackslengths)):
fo.write('}]')
else:
fo.write('},')
fo.write('}\n')
indexer = ''
trackslength = []
disclength = []
prevcombinenum = ''
trackcombinenum = ''
partialtracktitle = ''
tracktitle = []
discartist = ''
disctitle = ''
discyear = ''
discgenre = ''
formattedtrackslengths = []
formattedtracktitles = []
morethanonetitle = 0
aftertrackframeoffset = 0
infile.close()
fo.close()
fl.close()
| mit | -9,202,248,683,548,465,000 | 36.61658 | 193 | 0.612672 | false | 2.998761 | false | false | false |
dnil/OGTtoPED | OGTtoPED.py | 1 | 3605 | import argparse
import sys
from openpyxl import load_workbook
import config
from sample import Sample
from family import update_family, family_ped
# CLI
parser = argparse.ArgumentParser(description="Convert OGT xlsx to PED file")
parser.add_argument("orderform",
help="OGT order form with sample ID, status and family groups.")
parser.add_argument("outfile", help="Output PED file", nargs='?')
parser.add_argument("-D", "--debug", help="Enable DEBUG output.",
action="store_true")
args = parser.parse_args()
if config.debug:
print(sys.stderr, "DEBUG output turned on.")
config.debug = True
config.outfile = args.outfile
# Truncate output ped file
if config.outfile is not None:
out = open(config.outfile, 'w')
else:
out = sys.stdout
# Open workbook
wb = load_workbook(filename = args.orderform)
ws = wb.active
# Sanity checks
if ws.title != "material form":
print(sys.stderr, "WARNING: Non standard active sheet name ", ws.title)
if (ws['B12'].value != "Customer Sample ID"
or ws['M12'].value != "Additional Experimental Comments"
or ws['C12'].value != "Source (cells, tissue etc)"):
print(sys.stderr, ("Unexpected table / cell layout: check to see"
"that sheet is ok, and ask to have the script updated."))
exit(1)
# Main
# Iterate over all rows, parse row blocks
in_sample_section = False
in_family = False
max_rows = 1024
samples_found = 0
family = []
family_count = 0
for rownum in range(1,max_rows+1):
cell=ws["B" + str(rownum)]
if not in_sample_section:
if cell.value == "Customer Sample ID":
if config.debug:
print(sys.stderr, "Found sample ID tag.")
in_sample_section = True
else:
if cell.value is not None:
# Found a new sample row.
sample_id = cell.value
sample_id.rstrip()
if not in_family:
if config.debug:
print(sys.stderr, ("New family, starting with sample "
"'{}'").format(sample_id))
family_count += 1
info_cell = ws["M" + str(rownum)]
info = info_cell.value
if info is None:
info = "NA"
info.rstrip()
tissue_cell = ws["C" + str(rownum)]
tissue = tissue_cell.value
if tissue is None:
tissue = "NA"
tissue.rstrip()
sample = Sample(sample_id, info, tissue)
in_family = True
family.append(sample)
if sample.info.find("singleton") != -1:
# Found a singleton!
sample.affected = True
update_family(family)
print >> out, family_ped(family, family_count).rstrip()
# This ends the current family.
if config.debug:
print(sys.stderr, "Found a singleton. Family complete.")
family = []
in_family = False
# Note that the next row may be a None or a new family member..
samples_found += 1
elif cell.value is None:
# Value None means an empty row.
if in_family:
# This ends the current family.
if config.debug:
print(sys.stderr, "Family complete.")
update_family(family)
print >> out, family_ped(family, family_count).rstrip()
family = []
in_family = False
| artistic-2.0 | 893,453,471,902,956,800 | 27.84 | 79 | 0.553398 | false | 4.055118 | true | false | false |
ryanmiao/libvirt-test-API | utils/xml_parser.py | 1 | 7336 | #!/usr/bin/env python
#
# xml_parser.py: Parse XML document, the result is a python dict.
#
# Copyright (C) 2010-2012 Red Hat, Inc.
#
# libvirt-test-API is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranties of
# TITLE, NON-INFRINGEMENT, MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from xml.dom import minidom
import StringIO
class xml_parser(object):
"""Class xml_parser. It parses and xml document into a python dictionary.
The elements of the xml documents will be python dictionary keys. For
example, the xml document:
<firstel>firstdata</firstel>
<secondel>
<subinsecond> seconddata </subinsecond>
</secondel>
will be parsed into the python dictionary:
{ "firstel":"firstdata" , "secondel":{"subsinsecond":"seconddata"} }
Then the data can be retrieve as:
out = xml_parser.xml_parser().parse(xml)
out["firstel"] (this will be firstdata )
out["secondel"]["subinsecond"] (this will be seconddata)
attributes will be put into attr hash, so say the xml document is:
<source>
<device path = '/dev/mapper/vg_hpdl120g501-lv_home'/>
</source>
It will be parsed into:
out["source"]["device"]["attr"]["path"]
which will be set to:
"/dev/mapper/vg_hpdl120g501-lv_home"
"""
def __init__(self):
pass
def parse(self, arg):
out = None
if type(arg) == file:
out = self.parsefile(arg)
elif os.path.exists(arg):
print "file: %s " % arg
out = self.parsefile(arg)
else:
streamstr = StringIO.StringIO(arg)
out = self.parsefile(streamstr)
if out != None:
return out
def parsefile(self, filepath):
xmldoc = minidom.parse(filepath)
thenode = xmldoc.firstChild
outdic = dict()
self.parseintodict(thenode, 0, outdic)
return outdic
def parseintodict(self, node, level, out, rootkey = None):
for thenode in node.childNodes:
if thenode.nodeType == node.ELEMENT_NODE:
key = thenode.nodeName
value = None
try:
value = thenode.childNodes[0].data
if value.strip() == '':
value = None
except:
value = None
newdict = { key:value }
attrdic = None
if rootkey != None:
self.keyfindandset(out, rootkey, thenode)
else:
if thenode.attributes != None:
tmpattr = dict()
if thenode.attributes.length > 0:
for attrkey in thenode.attributes.keys():
tmpattr.update(
{attrkey:thenode.attributes.get(attrkey).nodeValue})
attrdic = { "attr":tmpattr }
if key in out:
if out[key] == None:
if attrdic != None:
if value == None:
out[key] = attrdic
else:
valdic = { "value":value }
valdic.update(attrdic)
out[key] = valdic
else:
out[key] = value
elif type(out[key]) == list:
if attrdic != None:
newdict.update(attrdic)
out[key].append(newdict)
elif type(out[key]) == dict:
if attrdic != None:
newdict.update(attrdic)
out[key].update(newdict)
else:
tmp = out[key]
out[key] = [tmp, value]
else:
out[key] = value
if attrdic != None:
if value == None:
newdict[key] = attrdic
else:
valdic = { "value":value }
valdic.update(attrdic)
newdict = valdic
out[key] = newdict
self.parseintodict(thenode, level+1, out, key)
return out
def keyfindandset(self, thedict, thekey, thenode):
# get the key/value pair from the node.
newvalkey = thenode.nodeName
value = None
try:
value = thenode.childNodes[0].data
if value.strip() == '':
value = None
except:
value = None
newval = { newvalkey:value }
attrdic = None
if thenode.attributes != None:
tmpattr = dict()
if thenode.attributes.length > 0:
for key in thenode.attributes.keys():
tmpattr.update(
{key:thenode.attributes.get(key).nodeValue})
attrdic = { "attr":tmpattr }
if attrdic != None:
if value == None:
newval.update({newvalkey:attrdic})
else:
valdic = { "value":value }
newval.update(valdic)
newval.update(attrdic)
for key in thedict.keys():
if key == thekey:
if type(thedict[key]) == dict:
if newvalkey in thedict[key]:
if newval[newvalkey] != None:
tmpdic = thedict[key][newvalkey]
thedict[key][newvalkey] = [tmpdic]
thedict[key][newvalkey].append(newval)
else:
if type(thedict[key][newvalkey]) == list:
thedict[key][newvalkey].append(dict())
else:
tmpdic = thedict[key][newvalkey]
thedict[key][newvalkey] = [tmpdic]
thedict[key][newvalkey].append(dict())
else:
thedict[key].update(newval)
elif type(thedict[key]) == list:
if newvalkey in thedict[key][-1]:
thedict[key].append(newval)
else:
thedict[key][-1].update(newval)
else:
thedict[key] = newval
if type(thedict[key]) == dict:
self.keyfindandset(thedict[key], thekey, thenode)
| gpl-2.0 | 4,921,790,545,853,446,000 | 39.530387 | 84 | 0.466876 | false | 4.640101 | false | false | false |
ksmit799/Toontown-Source | toontown/cogdominium/CogdoMazeGameObjects.py | 1 | 10289 | from pandac.PandaModules import CollisionSphere, CollisionTube, CollisionNode
from pandac.PandaModules import NodePath, BitMask32
from pandac.PandaModules import Point3, Point4, WaitInterval, Vec3, Vec4
from direct.interval.IntervalGlobal import LerpScaleInterval, LerpColorScaleInterval, LerpPosInterval, LerpFunc
from direct.interval.IntervalGlobal import Func, Sequence, Parallel
from direct.showbase.DirectObject import DirectObject
from direct.task.Task import Task
from toontown.toonbase import ToontownGlobals
import CogdoMazeGameGlobals as Globals
from CogdoGameExit import CogdoGameExit
import CogdoUtil
import math
import random
class CogdoMazeSplattable:
def __init__(self, object, name, collisionRadius):
self.object = object
self.splat = CogdoUtil.loadMazeModel('splash')
self.splat.setBillboardPointEye()
self.splat.setBin('fixed', 40)
self.splat.setDepthTest(False)
self.splat.setDepthWrite(False)
self.splatTrack = None
self._splatSfxIval = base.cogdoGameAudioMgr.createSfxIval('splat')
self.initGagCollision(name, collisionRadius)
return
def destroy(self):
self.disableGagCollision()
if self._splatSfxIval.isPlaying():
self._splatSfxIval.finish()
del self._splatSfxIval
def initGagCollision(self, name, radius):
self.gagCollisionName = name
collision = CollisionTube(0, 0, 0, 0, 0, 4, radius)
collision.setTangible(1)
self.gagCollNode = CollisionNode(self.gagCollisionName)
self.gagCollNode.setIntoCollideMask(ToontownGlobals.PieBitmask)
self.gagCollNode.addSolid(collision)
self.gagCollNodePath = self.object.attachNewNode(self.gagCollNode)
def disableGagCollision(self):
self.gagCollNodePath.removeNode()
def doSplat(self):
if self.splatTrack and self.splatTrack.isPlaying():
self.splatTrack.finish()
self.splat.reparentTo(render)
self.splat.setPos(self.object, 0, 0, 3.0)
self.splat.setY(self.splat.getY() - 1.0)
self._splatSfxIval.node = self.splat
self.splatTrack = Parallel(self._splatSfxIval, Sequence(Func(self.splat.showThrough), LerpScaleInterval(self.splat, duration=0.5, scale=6, startScale=1, blendType='easeOut'), Func(self.splat.hide)))
self.splatTrack.start()
class CogdoMazeDrop(NodePath, DirectObject):
def __init__(self, game, id, x, y):
NodePath.__init__(self, 'dropNode%s' % id)
self.game = game
self.id = id
self.reparentTo(hidden)
self.setPos(x, y, 0)
shadow = loader.loadModel('phase_3/models/props/square_drop_shadow')
shadow.setZ(0.2)
shadow.setBin('ground', 10)
shadow.setColor(1, 1, 1, 1)
shadow.reparentTo(self)
self.shadow = shadow
drop = CogdoUtil.loadMazeModel('cabinetSmFalling')
roll = random.randint(-15, 15)
drop.setHpr(0, 0, roll)
drop.setZ(Globals.DropHeight)
self.collTube = CollisionTube(0, 0, 0, 0, 0, 4, Globals.DropCollisionRadius)
self.collTube.setTangible(0)
name = Globals.DropCollisionName
self.collNode = CollisionNode(name)
self.collNode.addSolid(self.collTube)
self.collNodePath = drop.attachNewNode(self.collNode)
self.collNodePath.hide()
self.collNodePath.setTag('isFalling', str('True'))
drop.reparentTo(self)
self.drop = drop
self._dropSfx = base.cogdoGameAudioMgr.createSfxIval('drop', volume=0.6)
def disableCollisionDamage(self):
self.collTube.setTangible(1)
self.collTube.setRadius(Globals.DroppedCollisionRadius)
self.collNode.setIntoCollideMask(ToontownGlobals.WallBitmask)
self.collNodePath.setTag('isFalling', str('False'))
def getDropIval(self):
shadow = self.shadow
drop = self.drop
id = self.id
hangTime = Globals.ShadowTime
dropTime = Globals.DropTime
dropHeight = Globals.DropHeight
targetShadowScale = 0.5
targetShadowAlpha = 0.4
shadowScaleIval = LerpScaleInterval(shadow, dropTime, targetShadowScale, startScale=0)
shadowAlphaIval = LerpColorScaleInterval(shadow, hangTime, Point4(1, 1, 1, targetShadowAlpha), startColorScale=Point4(1, 1, 1, 0))
shadowIval = Parallel(shadowScaleIval, shadowAlphaIval)
startPos = Point3(0, 0, dropHeight)
drop.setPos(startPos)
dropIval = LerpPosInterval(drop, dropTime, Point3(0, 0, 0), startPos=startPos, blendType='easeIn')
dropSoundIval = self._dropSfx
dropSoundIval.node = self
self.drop.setTransparency(1)
def _setRandScale(t):
self.drop.setScale(self, 1 - random.random() / 16, 1 - random.random() / 16, 1 - random.random() / 4)
scaleChange = 0.4 + random.random() / 4
dropShakeSeq = Sequence(LerpScaleInterval(self.drop, 0.25, Vec3(1.0 + scaleChange, 1.0 + scaleChange / 2, 1.0 - scaleChange), blendType='easeInOut'), LerpScaleInterval(self.drop, 0.25, Vec3(1.0, 1.0, 1.0), blendType='easeInOut'), Func(self.disableCollisionDamage), LerpScaleInterval(self.drop, 0.2, Vec3(1.0 + scaleChange / 8, 1.0 + scaleChange / 8, 1.0 - scaleChange / 8), blendType='easeInOut'), LerpScaleInterval(self.drop, 0.2, Vec3(1.0, 1.0, 1.0), blendType='easeInOut'), LerpScaleInterval(self.drop, 0.15, Vec3(1.0 + scaleChange / 16, 1.0 + scaleChange / 16, 1.0 - scaleChange / 16), blendType='easeInOut'), LerpScaleInterval(self.drop, 0.15, Vec3(1.0, 1.0, 1.0), blendType='easeInOut'), LerpScaleInterval(self.drop, 0.1, Vec3(1.0 + scaleChange / 16, 1.0 + scaleChange / 8, 1.0 - scaleChange / 16), blendType='easeInOut'), LerpColorScaleInterval(self.drop, Globals.DropFadeTime, Vec4(1.0, 1.0, 1.0, 0.0)))
ival = Sequence(Func(self.reparentTo, render), Parallel(Sequence(WaitInterval(hangTime), dropIval), shadowIval), Parallel(Func(self.game.dropHit, self, id), dropSoundIval, dropShakeSeq), Func(self.game.cleanupDrop, id), name='drop%s' % id)
self.ival = ival
return ival
def destroy(self):
self.ival.pause()
self.ival = None
self._dropSfx.pause()
self._dropSfx = None
self.collTube = None
self.collNode = None
self.collNodePath.removeNode()
self.collNodePath = None
self.removeNode()
return
class CogdoMazeExit(CogdoGameExit, DirectObject):
EnterEventName = 'CogdoMazeDoor_Enter'
def __init__(self):
CogdoGameExit.__init__(self)
self.revealed = False
self._players = []
self._initCollisions()
def _initCollisions(self):
collSphere = CollisionSphere(0, 0, 0, 3.0)
collSphere.setTangible(0)
self.collNode = CollisionNode(self.getName())
self.collNode.addSolid(collSphere)
self.collNP = self.attachNewNode(self.collNode)
def destroy(self):
self.ignoreAll()
CogdoGameExit.destroy(self)
def enable(self):
self.collNode.setFromCollideMask(ToontownGlobals.WallBitmask)
self.accept('enter' + self.getName(), self._handleEnterCollision)
def disable(self):
self.ignore('enter' + self.getName())
self.collNode.setFromCollideMask(BitMask32(0))
def _handleEnterCollision(self, collEntry):
messenger.send(CogdoMazeExit.EnterEventName, [self])
def onstage(self):
self.unstash()
self.enable()
def offstage(self):
self.stash()
self.disable()
def playerEntersDoor(self, player):
if player not in self._players:
self._players.append(player)
self.toonEnters(player.toon)
def getPlayerCount(self):
return len(self._players)
def hasPlayer(self, player):
return player in self._players
class CogdoMazeWaterCooler(NodePath, DirectObject):
UpdateTaskName = 'CogdoMazeWaterCooler_Update'
def __init__(self, serialNum, model):
NodePath.__init__(self, 'CogdoMazeWaterCooler-%i' % serialNum)
self.serialNum = serialNum
self._model = model
self._model.reparentTo(self)
self._model.setPosHpr(0, 0, 0, 0, 0, 0)
self._initCollisions()
self._initArrow()
self._update = None
self.__startUpdateTask()
return
def destroy(self):
self.ignoreAll()
self.__stopUpdateTask()
self.collNodePath.removeNode()
self.removeNode()
def _initCollisions(self):
offset = Globals.WaterCoolerTriggerOffset
self.collSphere = CollisionSphere(offset[0], offset[1], offset[2], Globals.WaterCoolerTriggerRadius)
self.collSphere.setTangible(0)
name = Globals.WaterCoolerCollisionName
self.collNode = CollisionNode(name)
self.collNode.addSolid(self.collSphere)
self.collNodePath = self.attachNewNode(self.collNode)
def _initArrow(self):
matchingGameGui = loader.loadModel('phase_3.5/models/gui/matching_game_gui')
arrow = matchingGameGui.find('**/minnieArrow')
arrow.setScale(Globals.CoolerArrowScale)
arrow.setColor(*Globals.CoolerArrowColor)
arrow.setPos(0, 0, Globals.CoolerArrowZ)
arrow.setHpr(0, 0, 90)
arrow.setBillboardAxis()
self._arrow = NodePath('Arrow')
arrow.reparentTo(self._arrow)
self._arrow.reparentTo(self)
self._arrowTime = 0
self.accept(Globals.WaterCoolerShowEventName, self.showArrow)
self.accept(Globals.WaterCoolerHideEventName, self.hideArrow)
matchingGameGui.removeNode()
def showArrow(self):
self._arrow.unstash()
def hideArrow(self):
self._arrow.stash()
def update(self, dt):
newZ = math.sin(globalClock.getFrameTime() * Globals.CoolerArrowSpeed) * Globals.CoolerArrowBounce
self._arrow.setZ(newZ)
def __startUpdateTask(self):
self.__stopUpdateTask()
self._update = taskMgr.add(self._updateTask, self.UpdateTaskName, 45)
def __stopUpdateTask(self):
if self._update is not None:
taskMgr.remove(self._update)
return
def _updateTask(self, task):
dt = globalClock.getDt()
self.update(dt)
return Task.cont
| mit | 8,465,987,789,883,547,000 | 39.507874 | 919 | 0.666731 | false | 3.272583 | false | false | false |
chromium/chromium | ui/ozone/generate_constructor_list.py | 5 | 5576 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code generator for PlatformObject<> constructor list.
This script takes as arguments a list of platform names as a text file and
a list of types and generates a C++ source file containing a list of
the constructors for that object in platform order.
Example Output: ./ui/ozone/generate_constructor_list.py \
--platform test \
--platform dri \
--export OZONE \
--namespace ui \
--typename OzonePlatform \
--include '"ui/ozone/ozone_platform.h"'
// DO NOT MODIFY. GENERATED BY generate_constructor_list.py
#include "ui/ozone/platform_object_internal.h"
#include "ui/ozone/ozone_platform.h"
namespace ui {
OzonePlatform* CreateOzonePlatformTest();
OzonePlatform* CreateOzonePlatformDri();
} // namespace ui
namespace ui {
typedef ui::OzonePlatform* (*OzonePlatformConstructor)();
template <> const OzonePlatformConstructor
PlatformConstructorList<ui::OzonePlatform>::kConstructors[] = {
&ui::CreateOzonePlatformTest,
&ui::CreateOzonePlatformDri,
};
template class COMPONENT_EXPORT(OZONE) PlatformObject<ui::OzonePlatform>;
} // namespace ui
"""
try:
from StringIO import StringIO # for Python 2
except ImportError:
from io import StringIO # for Python 3
import optparse
import os
import collections
import re
import sys
def GetTypedefName(typename):
"""Determine typedef name of constructor for typename.
This is just typename + "Constructor".
"""
return typename + 'Constructor'
def GetConstructorName(typename, platform):
"""Determine name of static constructor function from platform name.
This is just "Create" + typename + platform.
"""
return 'Create' + typename + platform.capitalize()
def GenerateConstructorList(out, namespace, export, typenames, platforms,
includes, usings):
"""Generate static array containing a list of constructors."""
out.write('// DO NOT MODIFY. GENERATED BY generate_constructor_list.py\n')
out.write('\n')
out.write('#include "ui/ozone/platform_object_internal.h"\n')
out.write('\n')
for include in includes:
out.write('#include %(include)s\n' % {'include': include})
out.write('\n')
for using in usings:
out.write('using %(using)s;\n' % {'using': using})
out.write('\n')
out.write('namespace %(namespace)s {\n' % {'namespace': namespace})
out.write('\n')
# Declarations of constructor functions.
for typename in typenames:
for platform in platforms:
constructor = GetConstructorName(typename, platform)
out.write('%(typename)s* %(constructor)s();\n'
% {'typename': typename,
'constructor': constructor})
out.write('\n')
out.write('} // namespace %(namespace)s\n' % {'namespace': namespace})
out.write('\n')
out.write('namespace ui {\n')
out.write('\n')
# Handy typedefs for constructor types.
for typename in typenames:
out.write('typedef %(typename)s* (*%(typedef)s)();\n'
% {'typename': typename,
'typedef': GetTypedefName(typename)})
out.write('\n')
# The actual constructor lists.
for typename in typenames:
out.write('template <> const %(typedef)s\n'
% {'typedef': GetTypedefName(typename)})
out.write('PlatformConstructorList<%(typename)s>::kConstructors[] = {\n'
% {'typename': typename})
for platform in platforms:
constructor = GetConstructorName(typename, platform)
out.write(' &%(namespace)s::%(constructor)s,\n'
% {'namespace': namespace, 'constructor': constructor})
out.write('};\n')
out.write('\n')
# Exported template instantiation.
for typename in typenames:
out.write('template class COMPONENT_EXPORT(%(export)s)' \
' PlatformObject<%(typename)s>;\n'
% {'export': export, 'typename': typename})
out.write('\n')
out.write('} // namespace ui\n')
out.write('\n')
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--namespace', default='ozone')
parser.add_option('--export', default='OZONE')
parser.add_option('--platform_list')
parser.add_option('--output_cc')
parser.add_option('--include', action='append', default=[])
parser.add_option('--platform', action='append', default=[])
parser.add_option('--typename', action='append', default=[])
parser.add_option('--using', action='append', default=[])
options, _ = parser.parse_args(argv)
platforms = list(options.platform)
typenames = list(options.typename)
includes = list(options.include)
usings = list(options.using)
if options.platform_list:
platforms = open(options.platform_list, 'r').read().strip().split('\n')
if not platforms:
sys.stderr.write('No platforms are selected!')
sys.exit(1)
# Write to standard output or file specified by --output_cc.
out_cc = getattr(sys.stdout, 'buffer', sys.stdout)
if options.output_cc:
out_cc = open(options.output_cc, 'wb')
out_cc_str = StringIO()
GenerateConstructorList(out_cc_str, options.namespace, options.export,
typenames, platforms, includes, usings)
out_cc.write(out_cc_str.getvalue().encode('utf-8'))
if options.output_cc:
out_cc.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -1,612,006,540,855,881,200 | 29.140541 | 76 | 0.65477 | false | 3.869535 | false | false | false |
jgibbons-cp/sva_scan_examples | app/sva_scan_examples/config_helper.py | 1 | 1913 | import os
import sys
class ConfigHelper():
"""
Manage all configuration information for the application
"""
def __init__(self):
TRUE = "True"
FALSE = "False"
ERROR = 1
self.halo_key = os.getenv("HALO_API_KEY")
self.halo_secret = os.getenv("HALO_API_SECRET_KEY")
# get the results directory and create it if it does not exist
scan_results_directory = os.environ["SCAN_RESULTS_DIRECTORY"] = \
"/tmp/scan_results/"
path_exists = os.path.exists(scan_results_directory)
if not path_exists:
try:
os.mkdir(scan_results_directory)
path_exists = os.path.exists(scan_results_directory)
except OSError:
pass
days_for_scan_age = os.environ["DAYS_FOR_SCAN_AGE"] = "0"
days_for_scan_age = int(days_for_scan_age)
days_string_is_int_value = isinstance(days_for_scan_age, int)
os.environ["HALO_SERVER_GROUP"] = "Git"
scan_examples = os.environ["SCAN_EXAMPLES"] = "False"
heartbeat_interval = os.environ["HEARTBEAT_INTERVAL"] = "60"
heartbeat_interval = int(heartbeat_interval)
hi_string_is_int_value = isinstance(heartbeat_interval, int)
# for unit tests Travis populates the IP
server_ip = "<server_ip>"
os.environ["SERVER_IP"] = server_ip
unit_tests = os.environ["UNIT_TESTS"] = "no_unit_tests" # NOQA
if self.halo_key is None or self.halo_secret is None \
or not os.path.exists(scan_results_directory) or not path_exists \
or days_string_is_int_value == "False" \
or hi_string_is_int_value == "False" \
or scan_examples != TRUE and scan_examples != FALSE:
print "Configuration validation failed... exiting...\n"
sys.exit(ERROR)
| bsd-2-clause | 6,427,597,855,864,617,000 | 35.788462 | 79 | 0.578672 | false | 3.765748 | false | false | false |
emakis/erpnext | erpnext/controllers/taxes_and_totals.py | 1 | 24666 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import json
import frappe, erpnext
from frappe import _, scrub
from frappe.utils import cint, flt, cstr, fmt_money, round_based_on_smallest_currency_fraction
from erpnext.controllers.accounts_controller import validate_conversion_rate, \
validate_taxes_and_charges, validate_inclusive_tax
class calculate_taxes_and_totals(object):
def __init__(self, doc):
self.doc = doc
self.calculate()
def calculate(self):
self.discount_amount_applied = False
self._calculate()
if self.doc.meta.get_field("discount_amount"):
self.set_discount_amount()
self.apply_discount_amount()
if self.doc.doctype in ["Sales Invoice", "Purchase Invoice"]:
self.calculate_total_advance()
if self.doc.meta.get_field("other_charges_calculation"):
self.set_item_wise_tax_breakup()
def _calculate(self):
self.calculate_item_values()
self.initialize_taxes()
self.determine_exclusive_rate()
self.calculate_net_total()
self.calculate_taxes()
self.manipulate_grand_total_for_inclusive_tax()
self.calculate_totals()
self._cleanup()
def validate_conversion_rate(self):
# validate conversion rate
company_currency = erpnext.get_company_currency(self.doc.company)
if not self.doc.currency or self.doc.currency == company_currency:
self.doc.currency = company_currency
self.doc.conversion_rate = 1.0
else:
validate_conversion_rate(self.doc.currency, self.doc.conversion_rate,
self.doc.meta.get_label("conversion_rate"), self.doc.company)
self.doc.conversion_rate = flt(self.doc.conversion_rate)
def calculate_item_values(self):
if not self.discount_amount_applied:
for item in self.doc.get("items"):
self.doc.round_floats_in(item)
if item.discount_percentage == 100:
item.rate = 0.0
elif not item.rate:
item.rate = flt(item.price_list_rate *
(1.0 - (item.discount_percentage / 100.0)), item.precision("rate"))
if item.doctype in ['Quotation Item', 'Sales Order Item', 'Delivery Note Item', 'Sales Invoice Item']:
item.rate_with_margin = self.calculate_margin(item)
item.rate = flt(item.rate_with_margin * (1.0 - (item.discount_percentage / 100.0)), item.precision("rate"))\
if item.rate_with_margin > 0 else item.rate
item.net_rate = item.rate
item.amount = flt(item.rate * item.qty, item.precision("amount"))
item.net_amount = item.amount
self._set_in_company_currency(item, ["price_list_rate", "rate", "net_rate", "amount", "net_amount"])
item.item_tax_amount = 0.0
def _set_in_company_currency(self, doc, fields):
"""set values in base currency"""
for f in fields:
val = flt(flt(doc.get(f), doc.precision(f)) * self.doc.conversion_rate, doc.precision("base_" + f))
doc.set("base_" + f, val)
def initialize_taxes(self):
for tax in self.doc.get("taxes"):
if not self.discount_amount_applied:
validate_taxes_and_charges(tax)
validate_inclusive_tax(tax, self.doc)
tax.item_wise_tax_detail = {}
tax_fields = ["total", "tax_amount_after_discount_amount",
"tax_amount_for_current_item", "grand_total_for_current_item",
"tax_fraction_for_current_item", "grand_total_fraction_for_current_item"]
if tax.charge_type != "Actual" and \
not (self.discount_amount_applied and self.doc.apply_discount_on=="Grand Total"):
tax_fields.append("tax_amount")
for fieldname in tax_fields:
tax.set(fieldname, 0.0)
self.doc.round_floats_in(tax)
def determine_exclusive_rate(self):
if not any((cint(tax.included_in_print_rate) for tax in self.doc.get("taxes"))):
return
for item in self.doc.get("items"):
item_tax_map = self._load_item_tax_rate(item.item_tax_rate)
cumulated_tax_fraction = 0
for i, tax in enumerate(self.doc.get("taxes")):
tax.tax_fraction_for_current_item = self.get_current_tax_fraction(tax, item_tax_map)
if i==0:
tax.grand_total_fraction_for_current_item = 1 + tax.tax_fraction_for_current_item
else:
tax.grand_total_fraction_for_current_item = \
self.doc.get("taxes")[i-1].grand_total_fraction_for_current_item \
+ tax.tax_fraction_for_current_item
cumulated_tax_fraction += tax.tax_fraction_for_current_item
if cumulated_tax_fraction and not self.discount_amount_applied and item.qty:
item.net_amount = flt(item.amount / (1 + cumulated_tax_fraction), item.precision("net_amount"))
item.net_rate = flt(item.net_amount / item.qty, item.precision("net_rate"))
item.discount_percentage = flt(item.discount_percentage, item.precision("discount_percentage"))
self._set_in_company_currency(item, ["net_rate", "net_amount"])
def _load_item_tax_rate(self, item_tax_rate):
return json.loads(item_tax_rate) if item_tax_rate else {}
def get_current_tax_fraction(self, tax, item_tax_map):
"""
Get tax fraction for calculating tax exclusive amount
from tax inclusive amount
"""
current_tax_fraction = 0
if cint(tax.included_in_print_rate):
tax_rate = self._get_tax_rate(tax, item_tax_map)
if tax.charge_type == "On Net Total":
current_tax_fraction = tax_rate / 100.0
elif tax.charge_type == "On Previous Row Amount":
current_tax_fraction = (tax_rate / 100.0) * \
self.doc.get("taxes")[cint(tax.row_id) - 1].tax_fraction_for_current_item
elif tax.charge_type == "On Previous Row Total":
current_tax_fraction = (tax_rate / 100.0) * \
self.doc.get("taxes")[cint(tax.row_id) - 1].grand_total_fraction_for_current_item
if getattr(tax, "add_deduct_tax", None):
current_tax_fraction *= -1.0 if (tax.add_deduct_tax == "Deduct") else 1.0
return current_tax_fraction
def _get_tax_rate(self, tax, item_tax_map):
if item_tax_map.has_key(tax.account_head):
return flt(item_tax_map.get(tax.account_head), self.doc.precision("rate", tax))
else:
return tax.rate
def calculate_net_total(self):
self.doc.total = self.doc.base_total = self.doc.net_total = self.doc.base_net_total = 0.0
for item in self.doc.get("items"):
self.doc.total += item.amount
self.doc.base_total += item.base_amount
self.doc.net_total += item.net_amount
self.doc.base_net_total += item.base_net_amount
self.doc.round_floats_in(self.doc, ["total", "base_total", "net_total", "base_net_total"])
def calculate_taxes(self):
# maintain actual tax rate based on idx
actual_tax_dict = dict([[tax.idx, flt(tax.tax_amount, tax.precision("tax_amount"))]
for tax in self.doc.get("taxes") if tax.charge_type == "Actual"])
for n, item in enumerate(self.doc.get("items")):
item_tax_map = self._load_item_tax_rate(item.item_tax_rate)
for i, tax in enumerate(self.doc.get("taxes")):
# tax_amount represents the amount of tax for the current step
current_tax_amount = self.get_current_tax_amount(item, tax, item_tax_map)
# Adjust divisional loss to the last item
if tax.charge_type == "Actual":
actual_tax_dict[tax.idx] -= current_tax_amount
if n == len(self.doc.get("items")) - 1:
current_tax_amount += actual_tax_dict[tax.idx]
# accumulate tax amount into tax.tax_amount
if tax.charge_type != "Actual" and \
not (self.discount_amount_applied and self.doc.apply_discount_on=="Grand Total"):
tax.tax_amount += current_tax_amount
# store tax_amount for current item as it will be used for
# charge type = 'On Previous Row Amount'
tax.tax_amount_for_current_item = current_tax_amount
# set tax after discount
tax.tax_amount_after_discount_amount += current_tax_amount
if getattr(tax, "category", None):
# if just for valuation, do not add the tax amount in total
# hence, setting it as 0 for further steps
current_tax_amount = 0.0 if (tax.category == "Valuation") \
else current_tax_amount
current_tax_amount *= -1.0 if (tax.add_deduct_tax == "Deduct") else 1.0
# Calculate tax.total viz. grand total till that step
# note: grand_total_for_current_item contains the contribution of
# item's amount, previously applied tax and the current tax on that item
if i==0:
tax.grand_total_for_current_item = flt(item.net_amount + current_tax_amount, tax.precision("total"))
else:
tax.grand_total_for_current_item = \
flt(self.doc.get("taxes")[i-1].grand_total_for_current_item + current_tax_amount, tax.precision("total"))
# in tax.total, accumulate grand total of each item
tax.total += tax.grand_total_for_current_item
# set precision in the last item iteration
if n == len(self.doc.get("items")) - 1:
self.round_off_totals(tax)
# adjust Discount Amount loss in last tax iteration
if i == (len(self.doc.get("taxes")) - 1) and self.discount_amount_applied \
and self.doc.discount_amount and self.doc.apply_discount_on == "Grand Total":
self.adjust_discount_amount_loss(tax)
def get_current_tax_amount(self, item, tax, item_tax_map):
tax_rate = self._get_tax_rate(tax, item_tax_map)
current_tax_amount = 0.0
if tax.charge_type == "Actual":
# distribute the tax amount proportionally to each item row
actual = flt(tax.tax_amount, tax.precision("tax_amount"))
current_tax_amount = item.net_amount*actual / self.doc.net_total if self.doc.net_total else 0.0
elif tax.charge_type == "On Net Total":
current_tax_amount = (tax_rate / 100.0) * item.net_amount
elif tax.charge_type == "On Previous Row Amount":
current_tax_amount = (tax_rate / 100.0) * \
self.doc.get("taxes")[cint(tax.row_id) - 1].tax_amount_for_current_item
elif tax.charge_type == "On Previous Row Total":
current_tax_amount = (tax_rate / 100.0) * \
self.doc.get("taxes")[cint(tax.row_id) - 1].grand_total_for_current_item
current_tax_amount = flt(current_tax_amount, tax.precision("tax_amount"))
self.set_item_wise_tax(item, tax, tax_rate, current_tax_amount)
return current_tax_amount
def set_item_wise_tax(self, item, tax, tax_rate, current_tax_amount):
# store tax breakup for each item
key = item.item_code or item.item_name
item_wise_tax_amount = current_tax_amount*self.doc.conversion_rate
if tax.item_wise_tax_detail.get(key):
item_wise_tax_amount += tax.item_wise_tax_detail[key][1]
tax.item_wise_tax_detail[key] = [tax_rate,flt(item_wise_tax_amount, tax.precision("base_tax_amount"))]
def round_off_totals(self, tax):
tax.total = flt(tax.total, tax.precision("total"))
tax.tax_amount = flt(tax.tax_amount, tax.precision("tax_amount"))
tax.tax_amount_after_discount_amount = flt(tax.tax_amount_after_discount_amount, tax.precision("tax_amount"))
self._set_in_company_currency(tax, ["total", "tax_amount", "tax_amount_after_discount_amount"])
def adjust_discount_amount_loss(self, tax):
discount_amount_loss = self.doc.grand_total - flt(self.doc.discount_amount) - tax.total
tax.tax_amount_after_discount_amount = flt(tax.tax_amount_after_discount_amount +
discount_amount_loss, tax.precision("tax_amount"))
tax.total = flt(tax.total + discount_amount_loss, tax.precision("total"))
self._set_in_company_currency(tax, ["total", "tax_amount_after_discount_amount"])
def manipulate_grand_total_for_inclusive_tax(self):
# if fully inclusive taxes and diff
if self.doc.get("taxes") and all(cint(t.included_in_print_rate) for t in self.doc.get("taxes")):
last_tax = self.doc.get("taxes")[-1]
diff = self.doc.total - flt(last_tax.total, self.doc.precision("grand_total"))
if diff and abs(diff) <= (2.0 / 10**last_tax.precision("tax_amount")):
last_tax.tax_amount += diff
last_tax.tax_amount_after_discount_amount += diff
last_tax.total += diff
self._set_in_company_currency(last_tax,
["total", "tax_amount", "tax_amount_after_discount_amount"])
def calculate_totals(self):
self.doc.grand_total = flt(self.doc.get("taxes")[-1].total
if self.doc.get("taxes") else self.doc.net_total)
self.doc.total_taxes_and_charges = flt(self.doc.grand_total - self.doc.net_total,
self.doc.precision("total_taxes_and_charges"))
self._set_in_company_currency(self.doc, ["total_taxes_and_charges"])
if self.doc.doctype in ["Quotation", "Sales Order", "Delivery Note", "Sales Invoice"]:
self.doc.base_grand_total = flt(self.doc.grand_total * self.doc.conversion_rate) \
if self.doc.total_taxes_and_charges else self.doc.base_net_total
else:
self.doc.taxes_and_charges_added = self.doc.taxes_and_charges_deducted = 0.0
for tax in self.doc.get("taxes"):
if tax.category in ["Valuation and Total", "Total"]:
if tax.add_deduct_tax == "Add":
self.doc.taxes_and_charges_added += flt(tax.tax_amount_after_discount_amount)
else:
self.doc.taxes_and_charges_deducted += flt(tax.tax_amount_after_discount_amount)
self.doc.round_floats_in(self.doc, ["taxes_and_charges_added", "taxes_and_charges_deducted"])
self.doc.base_grand_total = flt(self.doc.grand_total * self.doc.conversion_rate) \
if (self.doc.taxes_and_charges_added or self.doc.taxes_and_charges_deducted) \
else self.doc.base_net_total
self._set_in_company_currency(self.doc, ["taxes_and_charges_added", "taxes_and_charges_deducted"])
self.doc.round_floats_in(self.doc, ["grand_total", "base_grand_total"])
if self.doc.meta.get_field("rounded_total"):
self.doc.rounded_total = round_based_on_smallest_currency_fraction(self.doc.grand_total,
self.doc.currency, self.doc.precision("rounded_total"))
if self.doc.meta.get_field("base_rounded_total"):
company_currency = erpnext.get_company_currency(self.doc.company)
self.doc.base_rounded_total = \
round_based_on_smallest_currency_fraction(self.doc.base_grand_total,
company_currency, self.doc.precision("base_rounded_total"))
def _cleanup(self):
for tax in self.doc.get("taxes"):
tax.item_wise_tax_detail = json.dumps(tax.item_wise_tax_detail, separators=(',', ':'))
def set_discount_amount(self):
if self.doc.additional_discount_percentage:
self.doc.discount_amount = flt(flt(self.doc.get(scrub(self.doc.apply_discount_on)))
* self.doc.additional_discount_percentage / 100, self.doc.precision("discount_amount"))
def apply_discount_amount(self):
if self.doc.discount_amount:
if not self.doc.apply_discount_on:
frappe.throw(_("Please select Apply Discount On"))
self.doc.base_discount_amount = flt(self.doc.discount_amount * self.doc.conversion_rate,
self.doc.precision("base_discount_amount"))
total_for_discount_amount = self.get_total_for_discount_amount()
taxes = self.doc.get("taxes")
net_total = 0
if total_for_discount_amount:
# calculate item amount after Discount Amount
for i, item in enumerate(self.doc.get("items")):
distributed_amount = flt(self.doc.discount_amount) * \
item.net_amount / total_for_discount_amount
item.net_amount = flt(item.net_amount - distributed_amount, item.precision("net_amount"))
net_total += item.net_amount
# discount amount rounding loss adjustment if no taxes
if (not taxes or self.doc.apply_discount_on == "Net Total") \
and i == len(self.doc.get("items")) - 1:
discount_amount_loss = flt(self.doc.net_total - net_total - self.doc.discount_amount,
self.doc.precision("net_total"))
item.net_amount = flt(item.net_amount + discount_amount_loss,
item.precision("net_amount"))
item.net_rate = flt(item.net_amount / item.qty, item.precision("net_rate")) if item.qty else 0
self._set_in_company_currency(item, ["net_rate", "net_amount"])
self.discount_amount_applied = True
self._calculate()
else:
self.doc.base_discount_amount = 0
def get_total_for_discount_amount(self):
if self.doc.apply_discount_on == "Net Total":
return self.doc.net_total
else:
actual_taxes_dict = {}
for tax in self.doc.get("taxes"):
if tax.charge_type == "Actual":
actual_taxes_dict.setdefault(tax.idx, tax.tax_amount)
elif tax.row_id in actual_taxes_dict:
actual_tax_amount = flt(actual_taxes_dict.get(tax.row_id, 0)) * flt(tax.rate) / 100
actual_taxes_dict.setdefault(tax.idx, actual_tax_amount)
return flt(self.doc.grand_total - sum(actual_taxes_dict.values()), self.doc.precision("grand_total"))
def calculate_total_advance(self):
if self.doc.docstatus < 2:
total_allocated_amount = sum([flt(adv.allocated_amount, adv.precision("allocated_amount"))
for adv in self.doc.get("advances")])
self.doc.total_advance = flt(total_allocated_amount, self.doc.precision("total_advance"))
if self.doc.party_account_currency == self.doc.currency:
invoice_total = flt(self.doc.grand_total - flt(self.doc.write_off_amount),
self.doc.precision("grand_total"))
else:
base_write_off_amount = flt(flt(self.doc.write_off_amount) * self.doc.conversion_rate,
self.doc.precision("base_write_off_amount"))
invoice_total = flt(self.doc.grand_total * self.doc.conversion_rate,
self.doc.precision("grand_total")) - base_write_off_amount
if invoice_total > 0 and self.doc.total_advance > invoice_total:
frappe.throw(_("Advance amount cannot be greater than {0} {1}")
.format(self.doc.party_account_currency, invoice_total))
if self.doc.docstatus == 0:
self.calculate_outstanding_amount()
def calculate_outstanding_amount(self):
# NOTE:
# write_off_amount is only for POS Invoice
# total_advance is only for non POS Invoice
if self.doc.doctype == "Sales Invoice":
self.calculate_paid_amount()
if self.doc.is_return: return
self.doc.round_floats_in(self.doc, ["grand_total", "total_advance", "write_off_amount"])
self._set_in_company_currency(self.doc, ['write_off_amount'])
if self.doc.party_account_currency == self.doc.currency:
total_amount_to_pay = flt(self.doc.grand_total - self.doc.total_advance
- flt(self.doc.write_off_amount), self.doc.precision("grand_total"))
else:
total_amount_to_pay = flt(flt(self.doc.grand_total *
self.doc.conversion_rate, self.doc.precision("grand_total")) - self.doc.total_advance
- flt(self.doc.base_write_off_amount), self.doc.precision("grand_total"))
if self.doc.doctype == "Sales Invoice":
self.doc.round_floats_in(self.doc, ["paid_amount"])
self.calculate_write_off_amount()
self.calculate_change_amount()
paid_amount = self.doc.paid_amount \
if self.doc.party_account_currency == self.doc.currency else self.doc.base_paid_amount
change_amount = self.doc.change_amount \
if self.doc.party_account_currency == self.doc.currency else self.doc.base_change_amount
self.doc.outstanding_amount = flt(total_amount_to_pay - flt(paid_amount) +
flt(change_amount), self.doc.precision("outstanding_amount"))
elif self.doc.doctype == "Purchase Invoice":
self.doc.outstanding_amount = flt(total_amount_to_pay, self.doc.precision("outstanding_amount"))
def calculate_paid_amount(self):
paid_amount = base_paid_amount = 0.0
if self.doc.is_pos:
for payment in self.doc.get('payments'):
payment.amount = flt(payment.amount)
payment.base_amount = payment.amount * flt(self.doc.conversion_rate)
paid_amount += payment.amount
base_paid_amount += payment.base_amount
elif not self.doc.is_return:
self.doc.set('payments', [])
self.doc.paid_amount = flt(paid_amount, self.doc.precision("paid_amount"))
self.doc.base_paid_amount = flt(base_paid_amount, self.doc.precision("base_paid_amount"))
def calculate_change_amount(self):
self.doc.change_amount = 0.0
self.doc.base_change_amount = 0.0
if self.doc.paid_amount > self.doc.grand_total and not self.doc.is_return \
and any([d.type == "Cash" for d in self.doc.payments]):
self.doc.change_amount = flt(self.doc.paid_amount - self.doc.grand_total +
self.doc.write_off_amount, self.doc.precision("change_amount"))
self.doc.base_change_amount = flt(self.doc.base_paid_amount - self.doc.base_grand_total +
self.doc.base_write_off_amount, self.doc.precision("base_change_amount"))
def calculate_write_off_amount(self):
if flt(self.doc.change_amount) > 0:
self.doc.write_off_amount = flt(self.doc.grand_total - self.doc.paid_amount + self.doc.change_amount,
self.doc.precision("write_off_amount"))
self.doc.base_write_off_amount = flt(self.doc.write_off_amount * self.doc.conversion_rate,
self.doc.precision("base_write_off_amount"))
def calculate_margin(self, item):
rate_with_margin = 0.0
if item.price_list_rate:
if item.pricing_rule and not self.doc.ignore_pricing_rule:
pricing_rule = frappe.get_doc('Pricing Rule', item.pricing_rule)
item.margin_type = pricing_rule.margin_type
item.margin_rate_or_amount = pricing_rule.margin_rate_or_amount
if item.margin_type and item.margin_rate_or_amount:
margin_value = item.margin_rate_or_amount if item.margin_type == 'Amount' else flt(item.price_list_rate) * flt(item.margin_rate_or_amount) / 100
rate_with_margin = flt(item.price_list_rate) + flt(margin_value)
return rate_with_margin
def set_item_wise_tax_breakup(self):
item_tax = {}
tax_accounts = []
company_currency = erpnext.get_company_currency(self.doc.company)
item_tax, tax_accounts = self.get_item_tax(item_tax, tax_accounts, company_currency)
headings = get_table_column_headings(tax_accounts)
distinct_items, taxable_amount = self.get_distinct_items()
rows = get_table_rows(distinct_items, item_tax, tax_accounts, company_currency, taxable_amount)
if not rows:
self.doc.other_charges_calculation = ""
else:
self.doc.other_charges_calculation = '''
<div class="tax-break-up" style="overflow-x: auto;">
<table class="table table-bordered table-hover">
<thead><tr>{headings}</tr></thead>
<tbody>{rows}</tbody>
</table>
</div>'''.format(**{
"headings": "".join(headings),
"rows": "".join(rows)
})
def get_item_tax(self, item_tax, tax_accounts, company_currency):
for tax in self.doc.taxes:
tax_amount_precision = tax.precision("tax_amount")
tax_rate_precision = tax.precision("rate");
item_tax_map = self._load_item_tax_rate(tax.item_wise_tax_detail)
for item_code, tax_data in item_tax_map.items():
if not item_tax.get(item_code):
item_tax[item_code] = {}
if isinstance(tax_data, list):
tax_rate = ""
if tax_data[0]:
if tax.charge_type == "Actual":
tax_rate = fmt_money(flt(tax_data[0], tax_amount_precision),
tax_amount_precision, company_currency)
else:
tax_rate = cstr(flt(tax_data[0], tax_rate_precision)) + "%"
tax_amount = fmt_money(flt(tax_data[1], tax_amount_precision),
tax_amount_precision, company_currency)
item_tax[item_code][tax.name] = [tax_rate, tax_amount]
else:
item_tax[item_code][tax.name] = [cstr(flt(tax_data, tax_rate_precision)) + "%", ""]
tax_accounts.append([tax.name, tax.account_head])
return item_tax, tax_accounts
def get_distinct_items(self):
distinct_item_names = []
distinct_items = []
taxable_amount = {}
for item in self.doc.items:
item_code = item.item_code or item.item_name
if item_code not in distinct_item_names:
distinct_item_names.append(item_code)
distinct_items.append(item)
taxable_amount[item_code] = item.net_amount
else:
taxable_amount[item_code] = taxable_amount.get(item_code, 0) + item.net_amount
return distinct_items, taxable_amount
def get_table_column_headings(tax_accounts):
headings_name = [_("Item Name"), _("Taxable Amount")] + [d[1] for d in tax_accounts]
headings = []
for head in headings_name:
if head == _("Item Name"):
headings.append('<th style="min-width: 120px;" class="text-left">' + (head or "") + "</th>")
else:
headings.append('<th style="min-width: 80px;" class="text-right">' + (head or "") + "</th>")
return headings
def get_table_rows(distinct_items, item_tax, tax_accounts, company_currency, taxable_amount):
rows = []
for item in distinct_items:
item_tax_record = item_tax.get(item.item_code or item.item_name)
if not item_tax_record:
continue
taxes = []
for head in tax_accounts:
if item_tax_record[head[0]]:
taxes.append("<td class='text-right'>(" + item_tax_record[head[0]][0] + ") "
+ item_tax_record[head[0]][1] + "</td>")
else:
taxes.append("<td></td>")
item_code = item.item_code or item.item_name
rows.append("<tr><td>{item_name}</td><td class='text-right'>{taxable_amount}</td>{taxes}</tr>".format(**{
"item_name": item.item_name,
"taxable_amount": fmt_money(taxable_amount.get(item_code, 0), item.precision("net_amount"), company_currency),
"taxes": "".join(taxes)
}))
return rows | gpl-3.0 | 2,259,971,950,055,743,200 | 39.043831 | 148 | 0.689167 | false | 2.987284 | false | false | false |
nismod/energy_demand | energy_demand/plotting/fig_p2_weather_val.py | 1 | 14554 | """Fig 2 figure
"""
import numpy as np
import matplotlib.pyplot as plt
#from scipy.stats import mstats
import pandas as pd
import geopandas as gpd
from scipy import stats
from shapely.geometry import Point
import matplotlib.pyplot as plt
from collections import defaultdict
from matplotlib.colors import Normalize
from energy_demand.plotting import result_mapping
from energy_demand.technologies import tech_related
from energy_demand.plotting import basic_plot_functions
def run(
data_input,
regions,
simulation_yr_to_plot,
population,
fueltype_str,
path_shapefile,
fig_name
):
"""
"""
fueltype_int = tech_related.get_fueltype_int(fueltype_str)
# -----------------------------------------------------------
# Iterate overall weather_yrs and store data in dataframe
# (columns = timestep, rows: value of year)
# -----------------------------------------------------------
# List of selected data for every weather year and region (which is then converted to array)
weather_yrs_data = defaultdict(dict)
print("Weather yrs: " + str(list(data_input.keys())), flush=True)
for weather_yr, data_weather_yr in data_input.items():
# Weather year specific data for every region
regions_fuel = data_weather_yr[simulation_yr_to_plot][fueltype_int] # Select fueltype
for region_nr, region_name in enumerate(regions):
try:
weather_yrs_data[region_name].append(regions_fuel[region_nr])
except (KeyError, AttributeError):
weather_yrs_data[region_name] = [regions_fuel[region_nr]]
regional_statistics_columns = [
'name',
'mean_peak_h',
'diff_av_max',
'mean_peak_h_pp',
'diff_av_max_pp',
'std_dev_average_every_h',
'std_dev_peak_h_norm_pop']
df_stats = pd.DataFrame(columns=regional_statistics_columns)
for region_name, region_data in weather_yrs_data.items():
# Convert regional data to dataframe
region_data_array = np.array(region_data)
df = pd.DataFrame(
region_data_array,
columns=range(8760))
# Calculate regional statistics
mean = df.mean(axis=0)
std_dev = df.std(axis=0) #standard deviation across every hour
# Get maximum per colum
#max_every_h = df.max()
#colum_max_h = max_every_h.argmax() #get colum (respesctively hour) of maximum value
# Average standard deviation across every hour
std_dev_average_every_h = np.std(list(std_dev))
max_entry = df.max(axis=0) #maximum entry for every hour
min_entry = df.min(axis=0) #maximum entry for every hour
# Get hour number with maximum demand
hour_nr_max = max_entry.argmax()
hour_nr_min = min_entry.argmin()
# standard deviation of peak hour
std_dev_peak_h = std_dev[hour_nr_max]
# Difference between average and max
diff_av_max = max_entry[hour_nr_max] - mean[hour_nr_max]
mean_peak_h = mean[hour_nr_max]
# Convert GW to KW
diff_av_max = diff_av_max * 1000000 #GW to KW
mean_peak_h = mean_peak_h * 1000000 #GW to KW
# Weight with population
for region_nr, n in enumerate(regions):
if region_name == n:
nr_of_reg = region_nr
break
pop = population[nr_of_reg]
# Divide standard deviation of peak hour by population
# which gives measure of weather variability in peak hour
std_dev_peak_h_norm_pop = std_dev_peak_h / pop
diff_av_max_pp = diff_av_max / pop
mean_peak_h_pp = mean_peak_h / pop
line_entry = [[
str(region_name),
mean_peak_h,
diff_av_max,
mean_peak_h_pp,
diff_av_max_pp,
std_dev_average_every_h,
std_dev_peak_h_norm_pop]]
line_df = pd.DataFrame(
line_entry, columns=regional_statistics_columns)
df_stats = df_stats.append(line_df)
print(df_stats['diff_av_max'].max())
print(df_stats['mean_peak_h'].max())
print(df_stats['std_dev_peak_h_norm_pop'].max())
print("-")
print(df_stats['diff_av_max_pp'].max())
print(df_stats['diff_av_max_pp'].min())
print("-")
print(df_stats['mean_peak_h_pp'].max())
print(df_stats['mean_peak_h_pp'].min())
# ---------------
# Create spatial maps
# http://darribas.org/gds15/content/labs/lab_03.html
# http://nbviewer.jupyter.org/gist/jorisvandenbossche/57d392c085901eb4981054402b37b6b1
# ---------------
# Load uk shapefile
uk_shapefile = gpd.read_file(path_shapefile)
# Merge stats to geopanda
shp_gdp_merged = uk_shapefile.merge(
df_stats,
on='name')
# Assign projection
crs = {'init': 'epsg:27700'} #27700: OSGB_1936_British_National_Grid
uk_gdf = gpd.GeoDataFrame(shp_gdp_merged, crs=crs)
ax = uk_gdf.plot()
# Assign bin colors according to defined cmap and whether
# plot with min_max values or only min/max values
#bin_values = [0, 0.0025, 0.005, 0.0075, 0.01]
#bin_values = [0, 0.02, 0.04, 0.06, 0.08, 0.1] #list(np.arange(0.0, 1.0, 0.1))
# Field to plot
field_to_plot = "diff_av_max_pp" # Difference between average and peak per person in KWh
#field_to_plot = "diff_av_max" # Difference between average and peak
field_to_plot = 'std_dev_peak_h_norm_pop'
nr_of_intervals = 6
bin_values = result_mapping.get_reasonable_bin_values_II(
data_to_plot=list(uk_gdf[field_to_plot]),
nr_of_intervals=nr_of_intervals)
print(float(uk_gdf[field_to_plot]))
print("BINS " + str(bin_values))
uk_gdf, cmap_rgb_colors, color_zero, min_value, max_value = user_defined_bin_classification(
uk_gdf,
field_to_plot,
bin_values=bin_values)
# plot with face color attribute
uk_gdf.plot(ax=ax, facecolor=uk_gdf['bin_color'], edgecolor='black', linewidth=0.5)
#shp_gdp_merged.plot(column='diff_av_max', scheme='QUANTILES', k=5, cmap='OrRd', linewidth=0.1)
#ax = uk_gdf.plot(column='diff_av_max', scheme='QUANTILES', k=5, cmap='OrRd', linewidth=0.1)
#uk_gdf[uk_gdf['name'] == 'E06000024'].plot(ax=ax, facecolor='green', edgecolor='black')
#uk_gdf[uk_gdf['diff_av_max'] < 0.01].plot(ax=ax, facecolor='blue', edgecolor='black')
# Get legend patches TODO IMPROVE
# TODO IMRPVE: MAKE CORRECT ONE FOR NEW PROCESSING
legend_handles = result_mapping.get_legend_handles(
bin_values[1:-1],
cmap_rgb_colors,
color_zero,
min_value,
max_value)
plt.legend(
handles=legend_handles,
title="tittel_elgend",
prop={'size': 8},
loc='upper center',
bbox_to_anchor=(0.5, -0.05),
frameon=False)
# PLot bins on plot
plt.text(
0,
-20,
bin_values[:-1], #leave away maximum value
fontsize=8)
plt.tight_layout()
plt.show()
raise Exception
plt.savefig(fig_name)
plt.close()
def norm_cmap(values, cmap, vmin=None, vmax=None):
"""
Normalize and set colormap
Parameters
----------
values : Series or array to be normalized
cmap : matplotlib Colormap
normalize : matplotlib.colors.Normalize
cm : matplotlib.cm
vmin : Minimum value of colormap. If None, uses min(values).
vmax : Maximum value of colormap. If None, uses max(values).
Returns
-------
n_cmap : mapping of normalized values to colormap (cmap)
Source
------
https://ocefpaf.github.io/python4oceanographers/blog/2015/08/24/choropleth/
"""
mn = vmin or min(values)
mx = vmax or max(values)
norm = Normalize(vmin=mn, vmax=mx)
n_cmap = plt.cm.ScalarMappable(norm=norm, cmap=cmap)
rgb_colors = [n_cmap.to_rgba(value) for value in values]
return n_cmap, rgb_colors
def plot_colors(rgb_colors):
"""function to plot colors
"""
nr_dots = len(rgb_colors)
dots = []
x = []
y = []
for i in range(nr_dots):
x.append(i + 20)
y.append(i + 20)
#plt.scatter(x, y, c=cmap, s=50)
plt.scatter(x, y, c=rgb_colors, s=50)
plt.show()
def user_defined_bin_classification(
input_df,
field_name,
bin_values,
cmap_diverging=None,
cmap_sequential=None
):
"""Classify values according to bins
Arguments
---------
input_df : dataframe
Dataframe to plot
higher_as_bin : int
Bin value of > than last bin
cmap_sequential : str
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds','YlOrBr',
'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu', 'GnBu', 'PuBu', 'YlGnBu',
'PuBuGn', 'BuGn', 'YlGn'
cmap_diverging : str
'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu', 'RdYlBu', 'RdYlGn',
'Spectral', 'coolwarm', 'bwr', 'seismic'
Info
-----
Include 0 in min_max_plot == False
Python colors:
https://matplotlib.org/1.4.1/examples/color/colormaps_reference.html
https://ocefpaf.github.io/python4oceanographers/blog/2015/08/24/choropleth/
https://matplotlib.org/examples/color/colormaps_reference.html
"""
# Check if largest value is large than last bin
max_real_value = float(input_df[field_name].max())
min_real_value = float(input_df[field_name].min())
if max_real_value > 0 and min_real_value < 0:
min_max_plot = True
else:
min_max_plot = False
if not min_max_plot:
# If only minus values
if max_real_value < 0: #only min values
if min_real_value > bin_values[0]:
# add "higher as bin"
bin_values.insert(0, min_real_value)
elif bin_values[0] < min_real_value:
crit_append_val = False
raise Exception("The minimum user defined bin smaller is larger than minimum existing value")
if not cmap_sequential:
cmap, cmap_rgb_colors = norm_cmap(bin_values[:1], cmap='Purples') #'YlOrBr'
else:
cmap, cmap_rgb_colors = norm_cmap(bin_values[:1], cmap=cmap_sequential) #'YlOrBr'
else: #only positive values
if max_real_value > bin_values[-1]:
# add "higher as bin"
bin_values.append(max_real_value)
elif bin_values[-1] > max_real_value:
raise Exception("The maximum user defined bin value is larger than maximum min: min: {} max: {}".format(bin_values[-1], max_real_value))
if not cmap_sequential:
cmap, cmap_rgb_colors = norm_cmap(bin_values[1:], cmap='Purples')
else:
cmap, cmap_rgb_colors = norm_cmap(bin_values[1:], cmap=cmap_sequential)
# e.g. [0, 3, 6] --> generates (0, 3], and (3, 6] bin
input_df['bin_color'] = pd.cut(
input_df[field_name],
bin_values,
include_lowest=True,
right=True,
labels=cmap_rgb_colors)
color_zero = 'grey' # default
else:
if max_real_value < bin_values[-1]:
raise Exception("The maximum user defined bin value is larger than maximum value {} {}".format(bin_values[-1], max_real_value))
elif min_real_value > bin_values[0]:
raise Exception("The minimum user defined bin smaller is larger than minimum existing value")
else:
pass
# Add minimum and maximum value
bin_values.append(max_real_value)
bin_values.insert(0, min_real_value)
if not cmap_diverging:
cmap, cmap_rgb_colors = norm_cmap(bin_values, cmap='coolwarm')
else:
cmap, cmap_rgb_colors = norm_cmap(bin_values, cmap=cmap_diverging)
# Reclassify zero value
positive_bin_colors = []
minus_bin_colors = []
minus_bins = []
positive_bins = [0]
for cnt, i in enumerate(bin_values):
if i < 0:
minus_bin_colors.append(cmap_rgb_colors[cnt])
minus_bins.append(i)
elif i == 0:
color_zero = cmap_rgb_colors[cnt]
else:
positive_bin_colors.append(cmap_rgb_colors[cnt])
positive_bins.append(i)
minus_bins.append(0)
# ----
# Classify
# ----
# Classify values in dataframe and assign color value as "bin" column
minus_dataframe = input_df[field_name][input_df[field_name] < 0].to_frame()
zero_dataframe = input_df[field_name][input_df[field_name] == 0].to_frame()
plus_dataframe = input_df[field_name][input_df[field_name] > 0].to_frame()
# e.g. [0, 3, 6] --> generates (0, 3], and (3, 6] bin
minus_dataframe['bin_color'] = pd.cut(
minus_dataframe[field_name],
minus_bins,
include_lowest=True,
right=True,
labels=minus_bin_colors)
zero_dataframe['bin_color'] = [color_zero for _ in range(len(zero_dataframe))] #create list with zero color
plus_dataframe['bin_color'] = pd.cut(
plus_dataframe[field_name],
positive_bins,
include_lowest=True,
right=True,
labels=positive_bin_colors)
# Add bins
input_df = minus_dataframe.append(zero_dataframe)
input_df = input_df.append(plus_dataframe)
return input_df, cmap_rgb_colors, color_zero, min_real_value, max_real_value
'''ax = input_df.plot()
# Calculate color values
#uk_gdf[uk_gdf['name'] == 'E06000024'].plot(ax=ax, facecolor='green', edgecolor='black')
#uk_gdf[uk_gdf['diff_av_max'] < 0.01].plot(ax=ax, facecolor='blue', edgecolor='black')
# Convert dict to dataframe
#df = pd.DataFrame.from_dict(input_df, orient='index')
#df['Coordinates'] = list(zip(df.longitude, df.latitude))
#df['Coordinates'] = df['Coordinates'].apply(Point)
# Load uk shapefile
uk_shapefile = gpd.read_file(path_shapefile)
# Assign correct projection
crs = {'init': 'epsg:27700'} #27700 == OSGB_1936_British_National_Grid
uk_gdf = gpd.GeoDataFrame(uk_shapefile, crs=crs)
# Transform
uk_gdf = uk_gdf.to_crs({'init' :'epsg:4326'})
# Plot
ax = uk_gdf.plot(color='white', edgecolor='black')
# print coordinates
#world.plot(column='gdp_per_cap', cmap='OrRd', scheme='quantiles');
plt.savefig(fig_path)'''
| mit | 2,933,761,730,508,633,600 | 32.457471 | 152 | 0.589735 | false | 3.359649 | false | false | false |
sivertkh/gtrackcore | gtrackcore/track_operations/raw_operations/Shift.py | 1 | 4473 |
import numpy as np
def shift(starts, ends, regionSize, strands=None, shiftLength=None,
useFraction=False, useStrands=True, treatMissingAsNegative=False):
"""
Shift elements in a track a give nr of BP.
:param starts: numpy array. Starts
:param ends: numpy array. Ends
:param regionSize: Int. The regions max size.
:param strands: numpy array. Strand info
:param shift: Int. Nr of BP to shift if we want to shift all segments
:param fraction: Boolean. Shift is a fraction of the size of the segment.
:param useMissingStrand: Boolean. If we are to use segment with missing
strand information.
:param treatMissingAsPositive: Boolean. If the missing segments with
missing strand information as negative or positive. Default is true. Set
to false if you want to treat them as negative.
:param allowOverlap: Boolean. If we allow overlapping segments in the
result.
:return: New shifted track as start, ends, strand and index
"""
assert shiftLength is not None
if useStrands and strands is None:
# We need strand info to follow it.
useStrands = False
if useStrands:
# Shift in the strand direction.
if treatMissingAsNegative:
positiveIndex = np.where(strands == '+')
negativeIndex = np.where((strands == '-') | (strands == '.'))
else:
positiveIndex = np.where((strands == '+') | (strands == '.'))
negativeIndex = np.where(strands == '-')
if useFraction:
positiveLengths = ends[positiveIndex] - starts[positiveIndex]
negativeLengths = ends[negativeIndex] - starts[negativeIndex]
positiveShift = positiveLengths * shiftLength
#positiveShift = positiveShift.astype(int)
positiveShift = np.around(positiveShift).astype(int)
negativeShift = negativeLengths * shiftLength
#negativeShift = negativeShift.astype(int)
negativeShift = np.around(negativeShift).astype(int)
else:
positiveShift = shiftLength
negativeShift = shiftLength
# Update the positive segments
starts[positiveIndex] = starts[positiveIndex] + positiveShift
ends[positiveIndex] = ends[positiveIndex] + positiveShift
# Update the negative segments
starts[negativeIndex] = starts[negativeIndex] - negativeShift
ends[negativeIndex] = ends[negativeIndex] - negativeShift
else:
if useFraction:
# Using a fraction of the length as a basis for the shift.
# Round to int
lengths = ends - starts
shiftLength = lengths * shiftLength
shiftLength = np.around(shiftLength).astype(int)
#shiftLength = shiftLength.astype(int)
# Strand is not given or we are to ignore it.
starts = starts + shiftLength
ends = ends + shiftLength
# We now check and fix any underflow/overflow
# This is where one of the segments is shifted under 0 or over the size
# of the region.
# Create a index to use in the trackView creation
index = np.arange(0, len(starts), 1, dtype='int32')
# Find end underflow and start overflow first. These segments can be
# removed directly.
endUnderflowIndex = np.where(ends < 0)
starts = np.delete(starts, endUnderflowIndex)
ends = np.delete(ends, endUnderflowIndex)
index = np.delete(index, endUnderflowIndex)
startOverflowIndex = np.where(starts > regionSize)
starts = np.delete(starts, startOverflowIndex)
ends = np.delete(ends, startOverflowIndex)
index = np.delete(index, startOverflowIndex)
# Find start underflow and set it to 0.
startUnderflowIndex = np.where(starts < 0)
starts[startUnderflowIndex] = 0
# Find end overflow and set i to regionSize.
endOverflowIndex = np.where(ends > regionSize)
ends[endOverflowIndex] = regionSize
# When two segments overlap totally, we get dangling points...
# For now we fix it by removing all points. This is probably not the
# way to go..
# if (newStarts == newEnds).any():
danglingPoints = np.where(starts == ends)
starts = np.delete(starts, danglingPoints)
ends = np.delete(ends, danglingPoints)
if strands is not None:
strands = np.delete(strands, danglingPoints)
index = np.delete(index, danglingPoints)
return starts, ends, index, strands
| gpl-3.0 | 2,891,743,441,771,215,000 | 36.588235 | 77 | 0.663537 | false | 4.157063 | false | false | false |
owtf/owtf | owtf/shell/interactive.py | 1 | 6596 | """
owtf.shell.interactive
~~~~~~~~~~~~~~~~~~~~~~
The shell module allows running arbitrary shell commands and is critical
to the framework in order to run third party tools. The interactive shell module allows non-blocking
interaction with subprocesses running tools or remote connections (i.e. shells)
"""
import logging
import subprocess
from owtf.db.session import get_scoped_session
from owtf.shell.base import BaseShell
from owtf.shell.utils import DisconnectException, recv_some, send_all, AsyncPopen
from owtf.utils.error import user_abort
__all__ = ["InteractiveShell"]
class InteractiveShell(BaseShell):
def __init__(self):
BaseShell.__init__(self) # Calling parent class to do its init part
self.connection = None
self.options = None
self.session = get_scoped_session()
self.command_time_offset = "InteractiveCommand"
def check_conn(self, abort_message):
"""Check the connection is alive or not
:param abort_message: Abort message to print
:type abort_message: `str`
:return: True if channel is open, else False
:rtype: `bool`
"""
if not self.connection:
logging.warn("ERROR - Communication channel closed - %s", abort_message)
return False
return True
def read(self, time=1):
"""Read data from the channel
:param time: Time interval in seconds
:type time: `int`
:return: Output from the channel
:rtype: `str`
"""
output = ""
if not self.check_conn("Cannot read"):
return output
try:
output = recv_some(self.connection, time)
except DisconnectException:
logging.warn("ERROR: read - The Communication channel is down!")
return output # End of communication channel
logging.info(output) # Show progress on screen
return output
def format_cmd(self, command):
"""Format the command to be printed on console
:param command: Command to run
:type command: `str`
:return: Formatted command string
:rtype: `str`
"""
if (
"RHOST" in self.options and "RPORT" in self.options
): # Interactive shell on remote connection
return "{!s}:{!s}-{!s}".format(
self.options["RHOST"], self.options["RPORT"], command
)
else:
return "Interactive - {!s}".format(command)
def run(self, command, plugin_info):
"""Format the command to be printed on console
:param command: Command to run
:type command: `str`
:return: Formatted command string
:rtype: `str`
"""
output = ""
cancelled = False
if not self.check_conn("NOT RUNNING Interactive command: {!s}".format(command)):
return output
# TODO: tail to be configurable: \n for *nix, \r\n for win32
log_cmd = self.format_cmd(command)
cmd_info = self.start_cmd(log_cmd, log_cmd)
try:
logging.info("Running Interactive command: %s", command)
send_all(self.connection, command + "\n")
output += self.read()
self.finish_cmd(self.session, cmd_info, cancelled, plugin_info)
except DisconnectException:
cancelled = True
logging.warn("ERROR: Run - The Communication Channel is down!")
self.finish_cmd(self.session, cmd_info, cancelled, plugin_info)
except KeyboardInterrupt:
cancelled = True
self.finish_cmd(self.session, cmd_info, cancelled, plugin_info)
output += user_abort("Command", output) # Identify as Command Level abort
if not cancelled:
self.finish_cmd(self.session, cmd_info, cancelled, plugin_info)
return output
def run_cmd_list(self, cmd_list, plugin_info):
"""Run a list of commands
:param cmd_list: List of commands to run
:type cmd_list: `list`
:param plugin_info: Plugin context information
:type plugin_info: `dict`
:return: Command output
:rtype: `str`
"""
output = ""
for command in cmd_list:
if command != "None":
output += self.run(command, plugin_info)
return output
def open(self, options, plugin_info):
"""Open the connection channel
:param options: User supplied args
:type options: `dict`
:param plugin_info: Context info for plugins
:type plugin_info: `dict`
:return: Plugin output
:rtype: `str`
"""
output = ""
if not self.connection:
name, command = options["ConnectVia"][0]
self.connection = AsyncPopen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
bufsize=1,
)
self.options = options # Store Options for Closing processing and if initial Commands are given
if options["InitialCommands"]:
output += self.run_cmd_list([options["InitialCommands"]], plugin_info)
output += self.read()
output += self.read()
return output
def close(self, plugin_info):
"""Close the communication channel
:param plugin_info: Context information for plugin
:type plugin_info: `dict`
:return: None
:rtype: None
"""
logging.info("Close: %s", str(self.options))
if self.options["CommandsBeforeExit"]:
logging.info("Running commands before closing Communication Channel..")
self.run_cmd_list(
self.options["CommandsBeforeExit"].split(
self.options["CommandsBeforeExitDelim"]
),
plugin_info,
)
logging.info("Trying to close Communication Channel..")
self.run("exit", plugin_info)
if self.options["ExitMethod"] == "kill":
logging.info("Killing Communication Channel..")
self.connection.kill()
else: # By default wait
logging.info("Waiting for Communication Channel to close..")
self.connection.wait()
self.connection = None
def is_closed(self):
"""Check if connection is closed
:return: True if closed, else True
:rtype: `bool`
"""
return self.connection is None
| bsd-3-clause | 6,571,136,281,010,158,000 | 34.085106 | 108 | 0.583535 | false | 4.468835 | false | false | false |
danaukes/popupcad | popupcad/filetypes/genericshapebase.py | 2 | 15733 |
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
from popupcad.geometry.vertex import ShapeVertex
import numpy
import qt.QtCore as qc
import qt.QtGui as qg
from dev_tools.enum import enum
import popupcad
class ShapeInvalid(Exception):
pass
class NotSimple(Exception):
pass
class GenericShapeBase(object):
display = ['construction', 'exterior', 'interiors']
editable = ['construction']
shapetypes = enum(
line='line',
polyline='polyline',
polygon='polygon',
circle='circle',
rect2point='rect2point')
deletable = []
def __init__(self,exterior,interiors,construction=False,test_shapely=False):
self.id = id(self)
self.exterior = exterior
self.interiors = interiors
self.construction = construction
# self.exterior = self.condition_loop(self.exterior)
# self.interiors = [self.condition_loop(interior) for interior in self.interiors]
self.exterior = self.remove_redundant_points(self.exterior)
self.interiors = [self.remove_redundant_points(interior) for interior in self.interiors]
def is_valid_bool(self):
try:
self.is_valid()
return True
except:
return False
def is_valid(self):
shapely = self.to_shapely(scaling = popupcad.csg_processing_scaling)
if not shapely.is_simple:
raise(NotSimple)
if not shapely.is_valid:
raise(ShapeInvalid)
@classmethod
def lastdir(cls):
return popupcad.lastshapedir
@classmethod
def setlastdir(cls, directory):
popupcad.lastshapedir = directory
def isValid(self):
notempty = self.len_exterior() > 0
return notempty
def copy_data(self, new_type, identical=True):
exterior = [vertex.copy(identical) for vertex in self.get_exterior()]
interiors = [[vertex.copy(identical) for vertex in interior]
for interior in self.get_interiors()]
new = new_type(exterior, interiors, self.is_construction())
if identical:
new.id = self.id
return new
def copy(self, identical=True):
return self.copy_data(type(self), identical)
def upgrade(self, identical=True):
exterior = [vertex.upgrade(identical) for vertex in self.get_exterior()]
interiors = [[vertex.upgrade(identical) for vertex in interior] for interior in self.get_interiors()]
new = type(self)(exterior, interiors, self.is_construction())
if identical:
new.id = self.id
return new
def get_exterior(self):
return self.exterior
def get_interiors(self):
return self.interiors
def is_construction(self):
try:
return self.construction
except AttributeError:
self.construction = False
return self.construction
def set_construction(self, test):
self.construction = test
def exteriorpoints(self, scaling=1):
return [vertex.getpos(scaling) for vertex in self.get_exterior()]
def interiorpoints(self, scaling=1):
return [[vertex.getpos(scaling) for vertex in interior]
for interior in self.get_interiors()]
def exteriorpoints_3d(self, z=0):
points = numpy.array([vertex.getpos() for vertex in self.get_exterior()])
size = list(points.shape)
size[1]+=1
points2 = numpy.zeros(size)
points2[:,:2] = points
points2[:,2] = z
return points2.tolist()
def interiorpoints_3d(self, z=0):
interiors2 = []
for interior in self.get_interiors():
points = numpy.array([vertex.getpos() for vertex in interior])
size = list(points.shape)
size[1]+=1
points2 = numpy.zeros(size)
points2[:,:2] = points
points2[:,2] = z
interiors2.append(points2.tolist())
return interiors2
def vertices(self):
vertices = self.get_exterior()[:]
[vertices.extend(interior) for interior in self.get_interiors()]
return vertices
def points(self, scaling=1):
return [vertex.getpos(scaling) for vertex in self.vertices()]
def segments_closed(self):
points = self.get_exterior()
segments = list(zip(points, points[1:] + points[:1]))
for points in self.get_interiors():
segments.extend(list(zip(points, points[1:] + points[:1])))
return segments
def segments_open(self):
points = self.get_exterior()
segments = list(zip(points[:-1], points[1:]))
for points in self.get_interiors():
segments.extend(list(zip(points[:-1], points[1:])))
return segments
def segmentpoints(self, scaling=1):
segments = self.segments()
segmentpoints = [
(point1.getpos(scaling),
point2.getpos(scaling)) for point1,
point2 in segments]
return segmentpoints
def painterpath(self):
exterior = self.exteriorpoints(scaling=popupcad.view_scaling)
interiors = self.interiorpoints(scaling=popupcad.view_scaling)
return self.gen_painterpath(exterior, interiors)
def gen_painterpath(self, exterior, interiors):
path = qg.QPainterPath()
return path
def properties(self):
from idealab_tools.propertyeditor import PropertyEditor
return PropertyEditor(self)
def addvertex_exterior(self, vertex, special=False):
self.exterior.append(vertex)
self.update_handles()
def addvertex_exterior_special(self, vertex, special=False):
if len(self.get_exterior()) > 2:
if special:
a = [v.getpos() for v in self.get_exterior()]
b = list(zip(a, a[1:] + a[:1]))
c = numpy.array(b)
d = numpy.array(vertex.getpos())
e = c - d
f = e.reshape(-1, 4)
g = (f**2).sum(1)
h = g.argmin()
self.insert_exterior_vertex(h + 1, vertex)
self.update_handles()
return
self.append_exterior_vertex(vertex)
self.update_handles()
def removevertex(self, vertex):
if vertex in self.exterior:
ii = self.exterior.index(vertex)
self.exterior.pop(ii)
for interior in self.interiors:
if vertex in self.interior:
ii = interior.index(vertex)
interior.pop(ii)
self.update_handles()
def checkedge(self, edge):
import popupcad.algorithms.points as points
for pt1, pt2 in zip(edge[:-1], edge[1:]):
if points.twopointsthesame(pt1, pt2, popupcad.distinguishable_number_difference):
raise Exception
@staticmethod
def _condition_loop(loop,round_vertices = False, test_rounded_vertices = True, remove_forward_redundancy=True, remove_loop_reduncancy=True,terminate_with_start = False,decimal_places = None):
if len(loop)>0:
if remove_forward_redundancy:
new_loop = [loop.pop(0)]
while not not loop:
v1 = new_loop[-1]
v2 = loop.pop(0)
if test_rounded_vertices:
equal = v1.rounded_is_equal(v2,decimal_places)
else:
equal = v1.identical(v2)
if not equal:
new_loop.append(v2)
else:
new_loop = loop[:]
v1 = new_loop[0]
v2 = new_loop[-1]
if test_rounded_vertices:
equal = v1.rounded_is_equal(v2,decimal_places)
else:
equal = v1.identical(v2)
if terminate_with_start:
if not equal:
new_loop.append(v1.copy(identical=False))
if remove_loop_reduncancy:
if equal:
new_loop.pop(-1)
if round_vertices:
new_loop = [item.round(decimal_places) for item in new_loop]
return new_loop
else:
return loop
def _condition(self,round_vertices = False, test_rounded_vertices = True, remove_forward_redundancy=True, remove_loop_reduncancy=True,terminate_with_start = False,decimal_places = None):
self.exterior = self._condition_loop(self.exterior,round_vertices = False, test_rounded_vertices = True, remove_forward_redundancy=True, remove_loop_reduncancy=True,terminate_with_start = False,decimal_places = None)
self.interiors = [self._condition_loop(interior,round_vertices = False, test_rounded_vertices = True, remove_forward_redundancy=True, remove_loop_reduncancy=True,terminate_with_start = False,decimal_places = None) for interior in self.interiors]
@classmethod
def condition_loop(cls,loop):
return cls._condition_loop(loop)
# def condition(self):
# self.exterior = self.condition_loop(self.exterior)
# self.interiors = [self.condition_loop(interior) for interior in self.interiors]
@classmethod
def gen_from_point_lists(cls, exterior_p, interiors_p, **kwargs):
exterior = [ShapeVertex(point) for point in exterior_p]
interiors= [[ShapeVertex(point) for point in interior] for interior in interiors_p]
return cls(exterior, interiors, **kwargs)
def genInteractiveVertices(self):
try:
return self._exteriorhandles, self._interiorhandles
except AttributeError:
self.update_handles()
return self._exteriorhandles, self._interiorhandles
def update_handles(self):
try:
for handle in self._handles:
handle.harddelete()
except AttributeError:
pass
exterior = [vertex.gen_interactive() for vertex in self.get_exterior()]
interiors = [[vertex.gen_interactive() for vertex in interior] for interior in self.get_interiors()]
handles = exterior[:]
[handles.extend(interior) for interior in interiors]
self._exteriorhandles = exterior
self._interiorhandles = interiors
self._handles = handles
def len_exterior(self):
return len(self.get_exterior())
def get_handles(self):
try:
return self._handles
except AttributeError:
self.update_handles()
return self._handles
def get_exterior_handles(self):
try:
return self._exteriorhandles
except AttributeError:
self.update_handles()
return self._exteriorhandles
def triangles3(self):
return []
@staticmethod
def generateQPolygon(points):
poly = qg.QPolygonF([qc.QPointF(*(point))
for point in numpy.array(points)])
return poly
def is_equal(self, other):
if isinstance(self, type(other)):
if len(
self.get_exterior()) == len(
other.get_exterior()) and len(
self.get_interiors()) == len(
other.get_interiors()):
for point1, point2 in zip(
self.get_exterior(), other.get_exterior()):
if not point1.is_equal(point2, popupcad.distinguishable_number_difference):
return False
for interior1, interior2 in zip(
self.get_interiors(), other.get_interiors()):
if len(interior1) != len(interior2):
return False
for point1, point2 in zip(interior1, interior2):
if not point1.is_equal(point2, popupcad.distinguishable_number_difference):
return False
return True
return False
def scale(self, m):
[item.scale(m) for item in self.get_exterior()]
[item.scale(m) for interior in self.get_interiors() for item in interior]
def shift(self, dxdy):
[item.shift(dxdy) for item in self.get_exterior()]
[item.shift(dxdy) for interior in self.get_interiors()
for item in interior]
def transform(self, T):
exteriorpoints = (T.dot(numpy.array(self.exteriorpoints_3d(z=1)).T)).T[:,:2].tolist()
interiorpoints = [(T.dot(numpy.array(interior).T)).T[:,:2].tolist() for interior in self.interiorpoints_3d(z=1)]
return self.gen_from_point_lists(exteriorpoints,interiorpoints)
def constrained_shift(self, dxdy, constraintsystem):
a = [(item, dxdy) for item in self.get_exterior()]
a.extend([(item, dxdy) for interior in self.get_interiors() for item in interior])
constraintsystem.constrained_shift(a)
def flip(self):
self.exterior = self.get_exterior()[::-1]
self.interiors = [interior[::-1] for interior in self.get_interiors()]
def hollow(self):
return [self]
def fill(self):
return [self]
def insert_exterior_vertex(self, ii, vertex):
self.exterior.insert(ii, vertex)
def append_exterior_vertex(self, vertex):
self.exterior.append(vertex)
def output_dxf(self,model_space,layer = None):
csg = self.to_shapely(scaling = popupcad.csg_processing_scaling)
new = popupcad.algorithms.csg_shapely.to_generic(csg)
return new.output_dxf(model_space,layer)
def __lt__(self,other):
return self.exteriorpoints()[0]<other.exteriorpoints()[0]
def find_minimal_enclosing_circle(self):
from popupcad.algorithms.minimal_enclosing_circle import numerical_stable_circle
return numerical_stable_circle(self.exteriorpoints)
#Gets the center
def get_center(self):
'''Retrieves the center point of the shape'''
points = self.exteriorpoints()
x_values = [point[0]/popupcad.SI_length_scaling for point in points]
y_values = [point[1]/popupcad.SI_length_scaling for point in points]
x = float(sum(x_values)) / len(x_values)
y = float(sum(y_values)) / len(y_values)
return (x, y)
def exterior_points_from_center(self):
'''Retrieves the exterior points relative to the center'''
center = self.get_center()
points = self.exteriorpoints()
x_values = [point[0]/popupcad.SI_length_scaling - center[0] for point in points]
y_values = [point[1]/popupcad.SI_length_scaling - center[1] for point in points]
return list(zip(x_values, y_values))
@classmethod
def remove_redundant_points(cls, points, scaling=1,loop_test = True):
newpoints = []
if len(points)>0:
points = points[:]
newpoints.append(points.pop(0))
while not not points:
newpoint = points.pop(0)
if not popupcad.algorithms.points.twopointsthesame(newpoints[-1].getpos(scaling),newpoint.getpos(scaling),popupcad.distinguishable_number_difference):
if len(points)==0 and loop_test:
if not popupcad.algorithms.points.twopointsthesame(newpoints[0].getpos(scaling),newpoint.getpos(scaling),popupcad.distinguishable_number_difference):
newpoints.append(newpoint)
else:
newpoints.append(newpoint)
return newpoints | mit | 9,166,630,827,800,457,000 | 35.590698 | 253 | 0.59372 | false | 3.885651 | true | false | false |
basilfx/BierApp-Server | bierapp/core/decorators.py | 1 | 1740 | from django.shortcuts import get_object_or_404
from django.http import Http404
from functools import wraps
from bierapp.accounts.models import User
from bierapp.core.models import Transaction, ProductGroup, Product, \
TransactionTemplate
from bierapp.utils.types import get_int
def resolve_user(func):
@wraps(func)
def _inner(request, id, *args, **kwargs):
try:
user = request.site.users.get(id=id)
except User.DoesNotExist:
raise Http404
return func(request, user, *args, **kwargs)
return _inner
def resolve_transaction(func):
@wraps(func)
def _inner(request, id, *args, **kwargs):
transaction = get_object_or_404(Transaction, pk=id, site=request.site)
return func(request, transaction, *args, **kwargs)
return _inner
def resolve_product_group(func):
@wraps(func)
def _inner(request, id, *args, **kwargs):
product_group = get_object_or_404(
ProductGroup, pk=id, site=request.site)
return func(request, product_group, *args, **kwargs)
return _inner
def resolve_product(func):
@wraps(func)
def _inner(request, group_id, id, *args, **kwargs):
product = get_object_or_404(Product, pk=id, product_group=group_id)
return func(request, product, *args, **kwargs)
return _inner
def resolve_template(func):
@wraps(func)
def _inner(request, *args, **kwargs):
template_id = get_int(request.GET, "template", default=False)
if template_id:
kwargs["template"] = get_object_or_404(
TransactionTemplate, pk=template_id,
category__site=request.site)
return func(request, *args, **kwargs)
return _inner
| gpl-3.0 | 2,630,288,681,782,983,000 | 28 | 78 | 0.644828 | false | 3.632568 | false | false | false |
aringh/odl | odl/util/vectorization.py | 1 | 8540 | # Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Utilities for internal functionality connected to vectorization."""
from __future__ import print_function, division, absolute_import
from builtins import object
from functools import wraps
import numpy as np
__all__ = ('is_valid_input_array', 'is_valid_input_meshgrid',
'out_shape_from_meshgrid', 'out_shape_from_array',
'OptionalArgDecorator', 'vectorize')
def is_valid_input_array(x, ndim=None):
"""Test if ``x`` is a correctly shaped point array in R^d."""
x = np.asarray(x)
if ndim is None or ndim == 1:
return x.ndim == 1 and x.size > 1 or x.ndim == 2 and x.shape[0] == 1
else:
return x.ndim == 2 and x.shape[0] == ndim
def is_valid_input_meshgrid(x, ndim):
"""Test if ``x`` is a `meshgrid` sequence for points in R^d."""
# This case is triggered in FunctionSpaceElement.__call__ if the
# domain does not have an 'ndim' attribute. We return False and
# continue.
if ndim is None:
return False
if not isinstance(x, tuple):
return False
if ndim > 1:
try:
np.broadcast(*x)
except (ValueError, TypeError): # cannot be broadcast
return False
return (len(x) == ndim and
all(isinstance(xi, np.ndarray) for xi in x) and
all(xi.ndim == ndim for xi in x))
def out_shape_from_meshgrid(mesh):
"""Get the broadcast output shape from a `meshgrid`."""
if len(mesh) == 1:
return (len(mesh[0]),)
else:
return np.broadcast(*mesh).shape
def out_shape_from_array(arr):
"""Get the output shape from an array."""
arr = np.asarray(arr)
if arr.ndim == 1:
return arr.shape
else:
return (arr.shape[1],)
class OptionalArgDecorator(object):
"""Abstract class to create decorators with optional arguments.
This class implements the functionality of a decorator that can
be used with and without arguments, i.e. the following patterns
both work::
@decorator
def myfunc(x, *args, **kwargs):
pass
@decorator(param, **dec_kwargs)
def myfunc(x, *args, **kwargs):
pass
The arguments to the decorator are passed on to the underlying
wrapper.
To use this class, subclass it and implement the static ``_wrapper``
method.
"""
def __new__(cls, *args, **kwargs):
"""Create a new decorator instance.
There are two cases to distinguish:
1. Without arguments::
@decorator
def myfunc(x):
pass
which is equivalent to ::
def myfunc(x):
pass
myfunc = decorator(myfunc)
Hence, in this case, the ``__new__`` method of the decorator
immediately returns the wrapped function.
2. With arguments::
@decorator(*dec_args, **dec_kwargs)
def myfunc(x):
pass
which is equivalent to ::
def myfunc(x):
pass
dec_instance = decorator(*dec_args, **dec_kwargs)
myfunc = dec_instance(myfunc)
Hence, in this case, the first call creates an actual class
instance of ``decorator``, and in the second statement, the
``dec_instance.__call__`` method returns the wrapper using
the stored ``dec_args`` and ``dec_kwargs``.
"""
# Decorating without arguments: return wrapper w/o args directly
instance = super(OptionalArgDecorator, cls).__new__(cls)
if (not kwargs and
len(args) == 1 and
callable(args[0])):
func = args[0]
return instance._wrapper(func)
# With arguments, return class instance
else:
instance.wrapper_args = args
instance.wrapper_kwargs = kwargs
return instance
def __call__(self, func):
"""Return ``self(func)``.
This method is invoked when the decorator was created with
arguments.
Parameters
----------
func : callable
Original function to be wrapped
Returns
-------
wrapped : callable
The wrapped function
"""
return self._wrapper(func, *self.wrapper_args, **self.wrapper_kwargs)
@staticmethod
def _wrapper(func, *wrapper_args, **wrapper_kwargs):
"""Make a wrapper for ``func`` and return it.
This is a default implementation that simply returns the wrapped
function, i.e., the resulting decorator is the identity.
"""
return func
class vectorize(OptionalArgDecorator):
"""Decorator class for function vectorization.
This vectorizer expects a function with exactly one positional
argument (input) and optional keyword arguments. The decorated
function has an optional ``out`` parameter for in-place evaluation.
Examples
--------
Use the decorator witout arguments:
>>> @vectorize
... def f(x):
... return x[0] + x[1] if x[0] < x[1] else x[0] - x[1]
>>>
>>> f([0, 1]) # np.vectorize'd functions always return an array
array(1)
>>> f([[0, -2], [1, 4]]) # corresponds to points [0, 1], [-2, 4]
array([1, 2])
The function may have ``kwargs``:
>>> @vectorize
... def f(x, param=1.0):
... return x[0] + x[1] if x[0] < param else x[0] - x[1]
>>>
>>> f([[0, -2], [1, 4]])
array([1, 2])
>>> f([[0, -2], [1, 4]], param=-1.0)
array([-1, 2])
You can pass arguments to the vectorizer, too:
>>> @vectorize(otypes=['float32'])
... def f(x):
... return x[0] + x[1] if x[0] < x[1] else x[0] - x[1]
>>> f([[0, -2], [1, 4]])
array([ 1., 2.], dtype=float32)
"""
@staticmethod
def _wrapper(func, *vect_args, **vect_kwargs):
"""Return the vectorized wrapper function."""
if not hasattr(func, '__name__'):
# Set name if not available. Happens if func is actually a function
func.__name__ = '{}.__call__'.format(func.__class__.__name__)
return wraps(func)(_NumpyVectorizeWrapper(func, *vect_args,
**vect_kwargs))
class _NumpyVectorizeWrapper(object):
"""Class for vectorization wrapping using `numpy.vectorize`.
The purpose of this class is to store the vectorized version of
a function when it is called for the first time.
"""
def __init__(self, func, *vect_args, **vect_kwargs):
"""Initialize a new instance.
Parameters
----------
func : callable
Python function or method to be wrapped
vect_args :
positional arguments for `numpy.vectorize`
vect_kwargs :
keyword arguments for `numpy.vectorize`
"""
super(_NumpyVectorizeWrapper, self).__init__()
self.func = func
self.vfunc = None
self.vect_args = vect_args
self.vect_kwargs = vect_kwargs
def __call__(self, x, out=None, **kwargs):
"""Vectorized function call.
Parameters
----------
x : `array-like` or sequence of `array-like`'s
Input argument(s) to the wrapped function
out : `numpy.ndarray`, optional
Appropriately sized array to write to
Returns
-------
out : `numpy.ndarray`
Result of the vectorized function evaluation. If ``out``
was given, the returned object is a reference to it.
"""
if np.isscalar(x):
x = np.array([x])
elif isinstance(x, np.ndarray) and x.ndim == 1:
x = x[None, :]
if self.vfunc is None:
# Not yet vectorized
def _func(*x, **kw):
return self.func(np.array(x), **kw)
self.vfunc = np.vectorize(_func, *self.vect_args,
**self.vect_kwargs)
if out is None:
return self.vfunc(*x, **kwargs)
else:
out[:] = self.vfunc(*x, **kwargs)
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| mpl-2.0 | -1,299,324,578,800,178,000 | 28.040816 | 79 | 0.559264 | false | 4.059914 | false | false | false |
euphorie/Euphorie | src/euphorie/client/authentication.py | 1 | 9395 | """
Authentication
--------------
User account plugins and authentication.
"""
from ..content.api.authentication import authenticate_token as authenticate_cms_token
from . import model
from .interfaces import IClientSkinLayer
from AccessControl import ClassSecurityInfo
from Acquisition import aq_parent
from App.class_init import InitializeClass
from euphorie.content.api.interfaces import ICMSAPISkinLayer
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
from Products.PluggableAuthService.interfaces.plugins import IAuthenticationPlugin
from Products.PluggableAuthService.interfaces.plugins import IChallengePlugin
from Products.PluggableAuthService.interfaces.plugins import IExtractionPlugin
from Products.PluggableAuthService.interfaces.plugins import IUserEnumerationPlugin
from Products.PluggableAuthService.interfaces.plugins import IUserFactoryPlugin
from Products.PluggableAuthService.plugins.BasePlugin import BasePlugin
from Products.PluggableAuthService.utils import classImplements
from z3c.saconfig import Session
from zope.publisher.interfaces.browser import IBrowserView
import logging
import six
import sqlalchemy.exc
import traceback
import urllib
log = logging.getLogger(__name__)
class NotImplementedError(Exception):
def __init__(self, message):
self.message = message
def graceful_recovery(default=None, log_args=True):
"""Decorator to safely use SQLAlchemy in PAS plugins. This decorator
makes sure SQL exceptions are caught and logged.
Code from Malthe Borch's pas.plugins.sqlalchemy package.
"""
def decorator(func):
def wrapper(*args, **kwargs):
try:
value = func(*args, **kwargs)
except sqlalchemy.exc.SQLAlchemyError as e:
if log_args is False:
args = ()
kwargs = {}
formatted_tb = traceback.format_exc()
try:
exc_str = str(e)
except Exception:
exc_str = "<%s at 0x%x>" % (e.__class__.__name__, id(e))
log.critical(
"caught SQL-exception: "
"%s (in method ``%s``; arguments were %s)\n\n%s"
% (
exc_str,
func.__name__,
", ".join(
[repr(arg) for arg in args]
+ [
"%s=%s" % (name, repr(value))
for (name, value) in kwargs.items()
]
),
formatted_tb,
)
)
return default
return value
return wrapper
return decorator
manage_addEuphorieAccountPlugin = PageTemplateFile(
"templates/addPasPlugin", globals(), __name__="manage_addEuphorieAccountPlugin"
)
def addEuphorieAccountPlugin(self, id, title="", REQUEST=None):
"""Add an EuphorieAccountPlugin to a Pluggable Authentication Service."""
p = EuphorieAccountPlugin(id, title)
self._setObject(p.getId(), p)
if REQUEST is not None:
REQUEST["RESPONSE"].redirect(
"%s/manage_workspace"
"?manage_tabs_message=Euphorie+Account+Manager+plugin+added."
% self.absolute_url()
)
class EuphorieAccountPlugin(BasePlugin):
meta_type = "Euphorie account manager"
security = ClassSecurityInfo()
def __init__(self, id, title=None):
self._setId(id)
self.title = title
def extractCredentials(self, request):
"""IExtractionPlugin implementation"""
token = request.getHeader("X-Euphorie-Token")
if token:
return {"api-token": token}
else:
return {}
@security.private
def _authenticate_token(self, credentials):
"""IAuthenticationPlugin implementation"""
token = credentials.get("api-token")
if not token:
return None
account = authenticate_cms_token(self, token)
return account
@security.private
def _authenticate_login(self, credentials):
login = credentials.get("login")
password = credentials.get("password")
account = authenticate(login, password)
if account is not None:
return (str(account.id), account.loginname)
else:
return None
@security.private
def _get_survey_session(self):
for parent in self.REQUEST.other["PARENTS"]:
if isinstance(parent, model.SurveySession):
return parent
else:
return None
@security.private
@graceful_recovery(log_args=False)
def authenticateCredentials(self, credentials):
if not (
IClientSkinLayer.providedBy(self.REQUEST)
or ICMSAPISkinLayer.providedBy(self.REQUEST)
):
return None
uid_and_login = self._authenticate_login(credentials)
if uid_and_login is None:
uid_and_login = self._authenticate_token(credentials)
if uid_and_login is not None:
session = self._get_survey_session()
if session is not None:
# Verify if current session matches the user. This prevents
# a cookie hijack attack.
if str(session.account_id) != uid_and_login[0]:
return None
return uid_and_login
else:
return None
@graceful_recovery()
def createUser(self, user_id, name):
"""IUserFactoryPlugin implementation"""
try:
user_id = int(user_id)
except (TypeError, ValueError):
return None
return Session().query(model.Account).get(user_id)
@graceful_recovery()
def enumerateUsers(
self,
id=None,
login=None,
exact_match=False,
sort_by=None,
max_results=None,
**kw
):
"""IUserEnumerationPlugin implementation"""
if not exact_match:
return []
if not IClientSkinLayer.providedBy(self.REQUEST):
return []
query = Session().query(model.Account)
if id is not None:
try:
query = query.filter(model.Account.id == int(id))
except ValueError:
return []
if login:
query = query.filter(model.Account.loginname == login)
account = query.first()
if account is not None:
return [{"id": str(account.id), "login": account.loginname}]
return []
def updateUser(self, user_id, login_name):
"""Changes the user's username. New method available since Plone 4.3.
Euphorie doesn't support this.
:returns: False
"""
return False
def updateEveryLoginName(self, quit_on_first_error=True):
"""Update login names of all users to their canonical value.
This should be done after changing the login_transform
property of PAS.
You can set quit_on_first_error to False to report all errors
before quitting with an error. This can be useful if you want
to know how many problems there are, if any.
:raises: NotImplementedError
"""
raise NotImplementedError(
"updateEveryLoginName method is not implemented by Euphorie"
)
def challenge(self, request, response):
"""IChallengePlugin implementation"""
if not IClientSkinLayer.providedBy(request):
return False
current_url = request.get("ACTUAL_URL", "")
query = request.get("QUERY_STRING")
if query:
if not query.startswith("?"):
query = "?" + query
current_url += query
context = request.get("PUBLISHED")
if not context:
log.error(
"Refusing to authenticate because no context has been found in %r", # noqa: E501
request,
)
return False
if IBrowserView.providedBy(context):
context = aq_parent(context)
login_url = "%s/@@login?%s" % (
context.absolute_url(),
urllib.urlencode(dict(came_from=current_url)),
)
response.redirect(login_url, lock=True)
return True
def authenticate(login, password):
"""Try to authenticate a user using the given login and password.
:param unicode login: login name
:param unicode password: users password
:return: :py:class:`Account <euphorie.client.model.Account>` instance
If the credentials are valid the matching account is returned. For invalid
credentials None is returned instead.
"""
if not login or not password:
return None
if isinstance(password, six.text_type):
password = password.encode("utf8")
login = login.lower()
accounts = Session().query(model.Account).filter(model.Account.loginname == login)
for account in accounts:
if account.verify_password(password):
return account
classImplements(
EuphorieAccountPlugin,
IAuthenticationPlugin,
IChallengePlugin,
IExtractionPlugin,
IUserEnumerationPlugin,
IUserFactoryPlugin,
)
InitializeClass(EuphorieAccountPlugin)
| gpl-2.0 | 5,777,297,908,647,784,000 | 30.955782 | 97 | 0.604896 | false | 4.501677 | false | false | false |
larryweya/dry-pyramid | drypyramid/tests.py | 1 | 20737 | import unittest
import colander
from webob.multidict import MultiDict
from webtest import TestApp
from pyramid import testing
from pyramid.httpexceptions import (
HTTPNotFound,
HTTPFound
)
from sqlalchemy import (
create_engine,
Column,
Integer,
String,
Table,
ForeignKey,
)
from sqlalchemy.orm import (
relationship,
)
from .models import (
SASession,
Base,
ModelFactory,
BaseRootFactory,
BaseUser,
)
from .auth import pwd_context
from .views import (
model_list,
model_create,
model_show,
model_update,
model_delete,
ModelView,
)
person_hobby = Table(
'person_hobby', Base.metadata,
Column('person_id', Integer, ForeignKey('person.id')),
Column('hobby_id', Integer, ForeignKey('hobby.id')),
)
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique=True, nullable=False)
age = Column(Integer, nullable=False)
hobbies = relationship('Hobby', secondary=person_hobby, backref='people')
class Hobby(Base):
__tablename__ = 'hobby'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique=True, nullable=False)
class PersonModelFactory(ModelFactory):
ModelClass = Person
def post_get_item(self, item):
self.post_get_item_called = True
class PersonForm(colander.MappingSchema):
name = colander.SchemaNode(colander.String(encoding='utf-8'))
age = colander.SchemaNode(colander.Integer())
class HobbiesSchema(colander.SequenceSchema):
name = colander.SchemaNode(
colander.String(encoding='utf-8'), title="Hobby")
class PersonUpdateForm(colander.MappingSchema):
hobbies = HobbiesSchema(values=[
('su', 'Superuser'),
('billing', 'Billing'),
('customer_care', 'Customer Care')
])
class TestBase(unittest.TestCase):
def _setup_db(self):
self.engine = create_engine('sqlite:///:memory:', echo=True)
SASession.configure(bind=self.engine)
Base.metadata.create_all(self.engine)
def setUp(self):
self.config = testing.setUp()
#self.config.add_route('login', '/login')
#self.config.add_route('root', '/*traverse')
self._setup_db()
def tearDown(self):
SASession.remove()
testing.tearDown()
class TestBaseModel(TestBase):
def test_create_from_dict(self):
data = {
'name': "Mr Smith",
'age': 23
}
model = Person.create_from_dict(data)
self.assertEqual(model.name, data['name'])
self.assertEqual(model.age, data['age'])
def test_to_dict(self):
model = Person(name="Mr Smith", age=23)
data = model.to_dict()
expected_data = {
'id': None,
'name': "Mr Smith",
'age': 23
}
self.assertEqual(data, expected_data)
def test_update_from_dict(self):
model = Person(name="Mr Smith", age=23)
update_data = {
'name': "Mrs Smith",
'age': 35
}
model.update_from_dict(update_data)
self.assertEqual(model.name, update_data['name'])
self.assertEqual(model.age, update_data['age'])
def test_to_dict_handles_relationships(self):
pass
class TestModelFactory(TestBase):
def setUp(self):
super(TestModelFactory, self).setUp()
self.request = testing.DummyRequest()
# this is done by ModelView on include
route_name = 'persons'
base_url = 'people'
PersonModelFactory.__route_name__ = route_name
self.config.add_route(route_name, '/{0}/*traverse'.format(base_url),
factory=PersonModelFactory)
self.factory = PersonModelFactory(self.request)
def test_list_url(self):
url = self.factory.list_url(self.request)
expected_url = "%s/people/" % self.request.application_url
self.assertEqual(url, expected_url)
def test_create_url(self):
self.factory = PersonModelFactory(self.request)
url = self.factory.create_url(self.request)
expected_url = "%s/people/add" % self.request.application_url
self.assertEqual(url, expected_url)
def test_show_url(self):
person = Person(id=1, name="Mr Smith", age=23)
url = self.factory.show_url(self.request, person)
expected_url = "{0}/people/{1}".format(self.request.application_url,
person.id)
self.assertEqual(url, expected_url)
def test_update_url(self):
person = Person(id=1, name="Mr Smith", age=23)
url = self.factory.update_url(self.request, person)
expected_url = "{0}/people/{1}/edit".format(
self.request.application_url, person.id)
self.assertEqual(url, expected_url)
def test_delete_url(self):
person = Person(id=1, name="Mr Smith", age=23)
url = self.factory.delete_url(self.request, person)
expected_url = "{0}/people/{1}/delete".format(
self.request.application_url, person.id)
self.assertEqual(url, expected_url)
def test_get_item_calls_post_get_item(self):
self.factory = PersonModelFactory(self.request)
# create a Person
person = Person(name="Mr Smith", age=23)
person.save()
self.factory.__getitem__('1')
self.assertTrue(self.factory.post_get_item_called)
class TestViewHelpers(TestBase):
def setUp(self):
super(TestViewHelpers, self).setUp()
self.config.add_route('persons', '/persons/*traverse',
factory=PersonModelFactory)
def test_model_list(self):
person = Person(name='Mr Smith', age=23)
person.save()
SASession.flush()
view = model_list(Person)
request = testing.DummyRequest()
response = view(request)
self.assertIn('records', response)
self.assertIsInstance(response['records'][0], Person)
def test_model_create(self):
def _post_create_response_callback(request, record):
return HTTPFound(request.route_url('persons',
traverse=(record.id,)))
def _pre_create_callback(request, record, values):
record.age = 25
view = model_create(Person, PersonForm, _post_create_response_callback,
_pre_create_callback)
request = testing.DummyRequest()
request.method = 'POST'
values = [
('csrf_token', request.session.get_csrf_token()),
('name', 'Mr Smith'),
('age', '22'),
]
request.POST = MultiDict(values)
context = PersonModelFactory(request)
response = view(context, request)
self.assertIsInstance(response, HTTPFound)
self.assertEqual(response.location,
'{0}/persons/1'.format(request.application_url))
person = Person.query().filter_by(name='Mr Smith').one()
self.assertEqual(person.age, 25)
def test_model_show(self):
person = Person(name='Mr Smith', age=23)
person.save()
SASession.flush()
view = model_show(Person)
request = testing.DummyRequest()
response = view(person, request)
self.assertIn('record', response)
self.assertIsInstance(response['record'], Person)
def test_model_update(self):
def _post_update_response_callback(request, record):
return HTTPFound(request.route_url('persons',
traverse=(record.id,)))
person = Person(name='Not Mr Smith', age=23)
person.save()
SASession.flush()
def _pre_update_callback(request, record, values):
record.age = 28
view = model_update(Person, PersonForm, _post_update_response_callback,
_pre_update_callback)
request = testing.DummyRequest()
request.method = 'POST'
values = [
('csrf_token', request.session.get_csrf_token()),
('name', 'Mr Smith'),
('age', '22'),
]
request.POST = MultiDict(values)
response = view(person, request)
self.assertIsInstance(response, HTTPFound)
self.assertEqual(response.location,
'{0}/persons/1'.format(request.application_url))
person = Person.query().filter_by(name='Mr Smith').one()
self.assertEqual(person.age, 28)
def test_model_delete(self):
def _post_del_response_callback(request, record):
return HTTPFound(request.route_url('persons', traverse=()))
person = Person(name='Mr Smith', age=23)
person.save()
SASession.flush()
view = model_delete(_post_del_response_callback)
self.config.add_view(view,
context=PersonModelFactory,
route_name='persons',
name='delete',
permission='delete',
check_csrf=True)
request = testing.DummyRequest()
request.method = 'POST'
response = view(person, request)
self.assertIsInstance(response, HTTPFound)
self.assertEqual(response.location,
'{0}/persons/'.format(request.application_url))
class TestRootFactory(BaseRootFactory):
pass
class FunctionalTestBase(TestBase):
application_url = 'http://localhost'
def setUp(self):
super(FunctionalTestBase, self).setUp()
self.config.set_root_factory(TestRootFactory)
session_factory = testing.DummySession
self.config.set_session_factory(session_factory)
class TestModelView(FunctionalTestBase):
class TestRenderer(object):
responses = {
'templates/person_list.pt': '{{"title": "People List"}}',
'templates/person_create.pt': '{{"title": "People Create"}}',
'templates/person_show.pt': '{{"title": "Person Show"}}',
'templates/person_update.pt': '{{"title": "Person Update",'
'"form_class": "{form_class}"}}',
# custom templates
'templates/person_custom_list.pt': '{{"title": "People Custom List"}}',
'templates/person_custom_create.pt': '{{"title": "People Custom Create"}},'
'"form_class": "{form_class}"}}',
'templates/person_custom_show.pt': '{{"title": "Person Custom Show"}}',
'templates/person_custom_update.pt': '{{"title": "Person Custom Update", '
'"form_class": "{form_class}"}}'
}
def __init__(self, info):
pass
def __call__(self, value, system):
renderer = system['renderer_name']
response = self.responses[renderer]
if 'form' in value:
response = response.format(
form_class=value['form'].schema.__class__.__name__)
return response
def setUp(self):
super(TestModelView, self).setUp()
self.config.add_renderer('.pt', self.TestRenderer)
person = Person(name="Mr Smith", age=23)
person.save()
SASession.flush()
def test_view_registration(self):
"""
Check that all views (list, create, show, update, delete) are
registered by default
"""
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
base_url_override = 'people'
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
# list
response = testapp.get('/people/')
response.mustcontain('People List')
# create
response = testapp.get('/people/add')
response.mustcontain('People Create')
# show
response = testapp.get('/people/1')
response.mustcontain('Person Show')
# update
response = testapp.get('/people/1/edit')
response.mustcontain('Person Update')
# delete
request = testing.DummyRequest()
csrf_token = request.session.get_csrf_token()
response = testapp.post('/people/1/delete', {'csrf_token': csrf_token})
self.assertEqual(response.status_code, 302)
def test_only_requested_views_are_registered(self):
"""
Test that only views within the enabled_views list are created and
exposed
"""
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
enabled_views = (ModelView.LIST, ModelView.CREATE, ModelView.UPDATE)
base_url_override = 'people'
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
# list
response = testapp.get('/people/')
response.mustcontain('People List')
# create
response = testapp.get('/people/add')
response.mustcontain('People Create')
# show
self.assertRaises(HTTPNotFound, testapp.get, '/people/1')
# update
response = testapp.get('/people/1/edit')
response.mustcontain('Person Update')
# delete
request = testing.DummyRequest()
csrf_token = request.session.get_csrf_token()
self.assertRaises(HTTPNotFound, testapp.post, '/people/1/delete',
{'csrf_token': csrf_token})
def test_update_view_uses_update_form_override_if_specified(self):
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
ModelUpdateFormClass = PersonUpdateForm
base_url_override = 'people'
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
# update
response = testapp.get('/people/1/edit')
response.mustcontain('PersonUpdateForm')
def test_renderer_overrides_work_on_all_views(self):
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
base_url_override = 'people'
list_view_renderer = 'templates/person_custom_list.pt'
create_view_renderer = 'templates/person_custom_create.pt'
show_view_renderer = 'templates/person_custom_show.pt'
update_view_renderer = 'templates/person_custom_update.pt'
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
# list
response = testapp.get('/people/')
response.mustcontain('People Custom List')
# create
response = testapp.get('/people/add')
response.mustcontain('People Custom Create')
# show
response = testapp.get('/people/1')
response.mustcontain('Person Custom Show')
# update
response = testapp.get('/people/1/edit')
response.mustcontain('Person Custom Update')
class TestModelViewResponseCallbacks(FunctionalTestBase):
def test_create_view_response_override_works(self):
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
base_url_override = 'people'
@classmethod
def post_save_response(cls, request, record):
return HTTPFound(request.route_url('person',
traverse=(record.id,)))
# NOTE: just overriding the function doesnt work
post_create_response_callback = post_save_response
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
request = testing.DummyRequest()
params = {
'name': 'Mr Smith',
'age': '22',
'csrf_token': request.session.get_csrf_token()}
response = testapp.post('/people/add', params)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location,
'{0}/people/1'.format(self.application_url))
def test_update_view_response_override_works(self):
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
base_url_override = 'people'
@classmethod
def post_save_response(cls, request, record):
return HTTPFound(request.route_url('person',
traverse=(record.id,)))
# NOTE: just overriding the function doesnt work
post_update_response_callback = post_save_response
person = Person(name='Mrs Smith', age=25)
SASession.add(person)
SASession.flush()
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
request = testing.DummyRequest()
params = {
'name': 'Mrs Jane Smith',
'age': '22',
'csrf_token': request.session.get_csrf_token()}
url = '/people/{0}/edit'.format(person.id)
response = testapp.post(url, params)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location,
'{0}/people/1'.format(self.application_url))
def test_delete_view_response_override_works(self):
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
base_url_override = 'people'
@classmethod
def post_save_response(cls, request, record):
return HTTPFound(request.route_url('person',
traverse=('2', 'edit')))
post_delete_response_callback = post_save_response
person = Person(name='Mr Smith', age=25)
SASession.add(person)
SASession.flush()
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
request = testing.DummyRequest()
params = {'csrf_token': request.session.get_csrf_token()}
url = '/people/{0}/delete'.format(person.id)
response = testapp.post(url, params)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location,
'{0}/people/2/edit'.format(self.application_url))
class TestLogin(TestBase):
def setUp(self):
super(TestLogin, self).setUp()
self.config.add_route('login', '/login')
pwd_context.load({'schemes': ['des_crypt']})
user = BaseUser(account_id='[email protected]', password='admin')
user.save()
SASession.flush()
def test_login_GET_request(self):
from views import user_login
request = testing.DummyRequest()
request.method = 'GET'
context = BaseRootFactory(request)
response = user_login(context, request)
self.assertIn('csrf_token', response)
self.assertIn('form', response)
def test_login_returns_bad_request_if_no_csrf_token(self):
from views import user_login
request = testing.DummyRequest()
request.method = 'POST'
context = BaseRootFactory(request)
response = user_login(context, request)
self.assertEqual(response.status_code, 400)
def test_login_POST_with_valid_credentials(self):
from views import user_login
request = testing.DummyRequest()
request.method = 'POST'
values = [
('csrf_token', request.session.get_csrf_token()),
('account_id', '[email protected]'),
('password', 'admin'),
]
request.POST = MultiDict(values)
context = BaseRootFactory(request)
response = user_login(context, request)
self.assertIsInstance(response, HTTPFound)
def test_login_POST_with_invalid_credentials(self):
from views import user_login
request = testing.DummyRequest()
request.method = 'POST'
values = [
('csrf_token', request.session.get_csrf_token()),
('account_id', '[email protected]'),
('password', 'wrong'),
]
request.POST = MultiDict(values)
context = BaseRootFactory(request)
response = user_login(context, request)
self.assertIn('csrf_token', response)
self.assertIn('form', response)
| mit | -7,523,207,633,318,657,000 | 33.619366 | 87 | 0.589815 | false | 4.14989 | true | false | false |
6aika/issue-reporting | issues/models/applications.py | 1 | 1241 | from django.db import models
from django.utils.crypto import get_random_string
from issues.excs import InvalidAppError
DEFAULT_APP_DATA = { # Used by `.autodetermine()` and the migration
'identifier': 'default',
'name': 'Default',
'key': ''
}
def generate_api_key():
return get_random_string(30)
class Application(models.Model):
active = models.BooleanField(default=True, db_index=True)
identifier = models.CharField(
max_length=64,
db_index=True,
help_text='a machine-readable name for this app (a package identifier, for instance)',
)
name = models.CharField(
max_length=64,
help_text='a human-readable name for this app',
)
key = models.CharField(max_length=32, unique=True, default=generate_api_key, editable=False)
@staticmethod
def autodetermine():
app_count = Application.objects.count()
if app_count == 0:
return Application.objects.create(**DEFAULT_APP_DATA)
elif app_count == 1:
return Application.objects.filter(key='').first()
raise InvalidAppError('There are %d applications, so a valid API key must be passed in' % app_count)
def __str__(self):
return self.name
| mit | 6,477,491,313,848,653,000 | 30.025 | 108 | 0.654311 | false | 3.854037 | false | false | false |
dajobe/SoCo | soco/ms_data_structures.py | 1 | 21067 | # -*- coding: utf-8 -*-
# pylint: disable = star-args, too-many-arguments, unsupported-membership-test
# pylint: disable = not-an-iterable
# Disable while we have Python 2.x compatability
# pylint: disable=useless-object-inheritance
"""This module contains all the data structures for music service plugins."""
# This needs to be integrated with Music Library data structures
from __future__ import unicode_literals
from .exceptions import DIDLMetadataError
from .utils import camel_to_underscore
from .xml import (
NAMESPACES, XML, ns_tag
)
def get_ms_item(xml, service, parent_id):
"""Return the music service item that corresponds to xml.
The class is identified by getting the type from the 'itemType' tag
"""
cls = MS_TYPE_TO_CLASS.get(xml.findtext(ns_tag('ms', 'itemType')))
out = cls.from_xml(xml, service, parent_id)
return out
def tags_with_text(xml, tags=None):
"""Return a list of tags that contain text retrieved recursively from an
XML tree."""
if tags is None:
tags = []
for element in xml:
if element.text is not None:
tags.append(element)
elif len(element) > 0: # pylint: disable=len-as-condition
tags_with_text(element, tags)
else:
message = 'Unknown XML structure: {}'.format(element)
raise ValueError(message)
return tags
class MusicServiceItem(object):
"""Class that represents a music service item."""
# These fields must be overwritten in the sub classes
item_class = None
valid_fields = None
required_fields = None
def __init__(self, **kwargs):
super(MusicServiceItem, self).__init__()
self.content = kwargs
@classmethod
def from_xml(cls, xml, service, parent_id):
"""Return a Music Service item generated from xml.
:param xml: Object XML. All items containing text are added to the
content of the item. The class variable ``valid_fields`` of each of
the classes list the valid fields (after translating the camel
case to underscore notation). Required fields are listed in the
class variable by that name (where 'id' has been renamed to
'item_id').
:type xml: :py:class:`xml.etree.ElementTree.Element`
:param service: The music service (plugin) instance that retrieved the
element. This service must contain ``id_to_extended_id`` and
``form_uri`` methods and ``description`` and ``service_id``
attributes.
:type service: Instance of sub-class of
:class:`soco.plugins.SoCoPlugin`
:param parent_id: The parent ID of the item, will either be the
extended ID of another MusicServiceItem or of a search
:type parent_id: str
For a track the XML can e.g. be on the following form:
.. code :: xml
<mediaMetadata xmlns="http://www.sonos.com/Services/1.1">
<id>trackid_141359</id>
<itemType>track</itemType>
<mimeType>audio/aac</mimeType>
<title>Teacher</title>
<trackMetadata>
<artistId>artistid_10597</artistId>
<artist>Jethro Tull</artist>
<composerId>artistid_10597</composerId>
<composer>Jethro Tull</composer>
<albumId>albumid_141358</albumId>
<album>MU - The Best Of Jethro Tull</album>
<albumArtistId>artistid_10597</albumArtistId>
<albumArtist>Jethro Tull</albumArtist>
<duration>229</duration>
<albumArtURI>http://varnish01.music.aspiro.com/sca/
imscale?h=90&w=90&img=/content/music10/prod/wmg/
1383757201/094639008452_20131105025504431/resources/094639008452.
jpg</albumArtURI>
<canPlay>true</canPlay>
<canSkip>true</canSkip>
<canAddToFavorites>true</canAddToFavorites>
</trackMetadata>
</mediaMetadata>
"""
# Add a few extra pieces of information
content = {'description': service.description,
'service_id': service.service_id,
'parent_id': parent_id}
# Extract values from the XML
all_text_elements = tags_with_text(xml)
for item in all_text_elements:
tag = item.tag[len(NAMESPACES['ms']) + 2:] # Strip namespace
tag = camel_to_underscore(tag) # Convert to nice names
if tag not in cls.valid_fields:
message = 'The info tag \'{}\' is not allowed for this item'.\
format(tag)
raise ValueError(message)
content[tag] = item.text
# Convert values for known types
for key, value in content.items():
if key == 'duration':
content[key] = int(value)
if key in ['can_play', 'can_skip', 'can_add_to_favorites',
'can_enumerate']:
content[key] = (value == 'true')
# Rename a single item
content['item_id'] = content.pop('id')
# And get the extended id
content['extended_id'] = service.id_to_extended_id(content['item_id'],
cls)
# Add URI if there is one for the relevant class
uri = service.form_uri(content, cls)
if uri:
content['uri'] = uri
# Check for all required values
for key in cls.required_fields:
if key not in content:
message = 'An XML field that correspond to the key \'{}\' '\
'is required. See the docstring for help.'.format(key)
return cls.from_dict(content)
@classmethod
def from_dict(cls, dict_in):
"""Initialize the class from a dict.
:param dict_in: The dictionary that contains the item content. Required
fields are listed class variable by that name
:type dict_in: dict
"""
kwargs = dict_in.copy()
args = [kwargs.pop(key) for key in cls.required_fields]
return cls(*args, **kwargs)
def __eq__(self, playable_item):
"""Return the equals comparison result to another ``playable_item``."""
if not isinstance(playable_item, MusicServiceItem):
return False
return self.content == playable_item.content
def __ne__(self, playable_item):
"""Return the not equals comparison result to another
``playable_item``"""
if not isinstance(playable_item, MusicServiceItem):
return True
return self.content != playable_item.content
def __repr__(self):
"""Return the repr value for the item.
The repr is on the form::
<class_name 'middle_part[0:40]' at id_in_hex>
where middle_part is either the title item in content, if it is set,
or ``str(content)``. The output is also cleared of non-ascii
characters.
"""
# 40 originates from terminal width (78) - (15) for address part and
# (19) for the longest class name and a little left for buffer
if self.content.get('title') is not None:
middle = self.content['title'].encode('ascii', 'replace')[0:40]
else:
middle = str(self.content).encode('ascii', 'replace')[0:40]
return '<{} \'{}\' at {}>'.format(self.__class__.__name__,
middle,
hex(id(self)))
def __str__(self):
"""Return the str value for the item::
<class_name 'middle_part[0:40]' at id_in_hex>
where middle_part is either the title item in content, if it is set, or
``str(content)``. The output is also cleared of non-ascii characters.
"""
return self.__repr__()
@property
def to_dict(self):
"""Return a copy of the content dict."""
return self.content.copy()
@property
def didl_metadata(self):
"""Return the DIDL metadata for a Music Service Track.
The metadata is on the form:
.. code :: xml
<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">
<item id="...self.extended_id..."
parentID="...self.parent_id..."
restricted="true">
<dc:title>...self.title...</dc:title>
<upnp:class>...self.item_class...</upnp:class>
<desc id="cdudn"
nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
self.content['description']
</desc>
</item>
</DIDL-Lite>
"""
# Check if this item is meant to be played
if not self.can_play:
message = 'This item is not meant to be played and therefore '\
'also not to create its own didl_metadata'
raise DIDLMetadataError(message)
# Check if we have the attributes to create the didl metadata:
for key in ['extended_id', 'title', 'item_class']:
if not hasattr(self, key):
message = 'The property \'{}\' is not present on this item. '\
'This indicates that this item was not meant to create '\
'didl_metadata'.format(key)
raise DIDLMetadataError(message)
if 'description' not in self.content:
message = 'The item for \'description\' is not present in '\
'self.content. This indicates that this item was not meant '\
'to create didl_metadata'
raise DIDLMetadataError(message)
# Main element, ugly? yes! but I have given up on using namespaces
# with xml.etree.ElementTree
item_attrib = {
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:upnp': 'urn:schemas-upnp-org:metadata-1-0/upnp/',
'xmlns:r': 'urn:schemas-rinconnetworks-com:metadata-1-0/',
'xmlns': 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/'
}
xml = XML.Element('DIDL-Lite', item_attrib)
# Item sub element
item_attrib = {
'parentID': '',
'restricted': 'true',
'id': self.extended_id
}
# Only add the parent_id if we have it
if self.parent_id:
item_attrib['parentID'] = self.parent_id
item = XML.SubElement(xml, 'item', item_attrib)
# Add title and class
XML.SubElement(item, 'dc:title').text = self.title
XML.SubElement(item, 'upnp:class').text = self.item_class
# Add the desc element
desc_attrib = {
'id': 'cdudn',
'nameSpace': 'urn:schemas-rinconnetworks-com:metadata-1-0/'
}
desc = XML.SubElement(item, 'desc', desc_attrib)
desc.text = self.content['description']
return xml
@property
def item_id(self):
"""Return the item id."""
return self.content['item_id']
@property
def extended_id(self):
"""Return the extended id."""
return self.content['extended_id']
@property
def title(self):
"""Return the title."""
return self.content['title']
@property
def service_id(self):
"""Return the service ID."""
return self.content['service_id']
@property
def can_play(self):
"""Return a boolean for whether the item can be played."""
return bool(self.content.get('can_play'))
@property
def parent_id(self):
"""Return the extended parent_id, if set, otherwise return None."""
return self.content.get('parent_id')
@property
def album_art_uri(self):
"""Return the album art URI if set, otherwise return None."""
return self.content.get('album_art_uri')
class MSTrack(MusicServiceItem):
"""Class that represents a music service track."""
item_class = 'object.item.audioItem.musicTrack'
valid_fields = [
'album', 'can_add_to_favorites', 'artist', 'album_artist_id', 'title',
'album_id', 'album_art_uri', 'album_artist', 'composer_id',
'item_type', 'composer', 'duration', 'can_skip', 'artist_id',
'can_play', 'id', 'mime_type', 'description'
]
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'uri', 'description',
'service_id']
def __init__(self, title, item_id, extended_id, uri, description,
service_id, **kwargs):
"""Initialize MSTrack item."""
content = {
'title': title, 'item_id': item_id, 'extended_id': extended_id,
'uri': uri, 'description': description, 'service_id': service_id,
}
content.update(kwargs)
super(MSTrack, self).__init__(**content)
@property
def album(self):
"""Return the album title if set, otherwise return None."""
return self.content.get('album')
@property
def artist(self):
"""Return the artist if set, otherwise return None."""
return self.content.get('artist')
@property
def duration(self):
"""Return the duration if set, otherwise return None."""
return self.content.get('duration')
@property
def uri(self):
"""Return the URI."""
# x-sonos-http:trackid_19356232.mp4?sid=20&flags=32
return self.content['uri']
class MSAlbum(MusicServiceItem):
"""Class that represents a Music Service Album."""
item_class = 'object.container.album.musicAlbum'
valid_fields = [
'username', 'can_add_to_favorites', 'artist', 'title', 'album_art_uri',
'can_play', 'item_type', 'service_id', 'id', 'description',
'can_cache', 'artist_id', 'can_skip'
]
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'uri', 'description',
'service_id']
def __init__(self, title, item_id, extended_id, uri, description,
service_id, **kwargs):
content = {
'title': title, 'item_id': item_id, 'extended_id': extended_id,
'uri': uri, 'description': description, 'service_id': service_id,
}
content.update(kwargs)
super(MSAlbum, self).__init__(**content)
@property
def artist(self):
"""Return the artist if set, otherwise return None."""
return self.content.get('artist')
@property
def uri(self):
"""Return the URI."""
# x-rincon-cpcontainer:0004002calbumid_22757081
return self.content['uri']
class MSAlbumList(MusicServiceItem):
"""Class that represents a Music Service Album List."""
item_class = 'object.container.albumlist'
valid_fields = [
'id', 'title', 'item_type', 'artist', 'artist_id', 'can_play',
'can_enumerate', 'can_add_to_favorites', 'album_art_uri', 'can_cache'
]
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'uri', 'description',
'service_id']
def __init__(self, title, item_id, extended_id, uri, description,
service_id, **kwargs):
content = {
'title': title, 'item_id': item_id, 'extended_id': extended_id,
'uri': uri, 'description': description, 'service_id': service_id,
}
content.update(kwargs)
super(MSAlbumList, self).__init__(**content)
@property
def uri(self):
"""Return the URI."""
# x-rincon-cpcontainer:000d006cplaylistid_26b18dbb-fd35-40bd-8d4f-
# 8669bfc9f712
return self.content['uri']
class MSPlaylist(MusicServiceItem):
"""Class that represents a Music Service Play List."""
item_class = 'object.container.albumlist'
valid_fields = ['id', 'item_type', 'title', 'can_play', 'can_cache',
'album_art_uri', 'artist', 'can_enumerate',
'can_add_to_favorites', 'artist_id']
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'uri', 'description',
'service_id']
def __init__(self, title, item_id, extended_id, uri, description,
service_id, **kwargs):
content = {
'title': title, 'item_id': item_id, 'extended_id': extended_id,
'uri': uri, 'description': description, 'service_id': service_id,
}
content.update(kwargs)
super(MSPlaylist, self).__init__(**content)
@property
def uri(self):
"""Return the URI."""
# x-rincon-cpcontainer:000d006cplaylistid_c86ddf26-8ec5-483e-b292-
# abe18848e89e
return self.content['uri']
class MSArtistTracklist(MusicServiceItem):
"""Class that represents a Music Service Artist Track List."""
item_class = 'object.container.playlistContainer.sameArtist'
valid_fields = ['id', 'title', 'item_type', 'can_play', 'album_art_uri']
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'uri', 'description',
'service_id']
def __init__(self, title, item_id, extended_id, uri, description,
service_id, **kwargs):
content = {
'title': title, 'item_id': item_id, 'extended_id': extended_id,
'uri': uri, 'description': description, 'service_id': service_id,
}
content.update(kwargs)
super(MSArtistTracklist, self).__init__(**content)
@property
def uri(self):
"""Return the URI."""
# x-rincon-cpcontainer:100f006cartistpopsongsid_1566
return 'x-rincon-cpcontainer:100f006c{}'.format(self.item_id)
class MSArtist(MusicServiceItem):
"""Class that represents a Music Service Artist."""
valid_fields = [
'username', 'can_add_to_favorites', 'artist', 'title', 'album_art_uri',
'item_type', 'id', 'service_id', 'description', 'can_cache'
]
# Since MSArtist cannot produce didl_metadata, they are not strictly
# required, but it makes sense to require them anyway, since they are the
# fields that that describe the item
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'service_id']
def __init__(self, title, item_id, extended_id, service_id, **kwargs):
content = {'title': title, 'item_id': item_id,
'extended_id': extended_id, 'service_id': service_id}
content.update(kwargs)
super(MSArtist, self).__init__(**content)
class MSFavorites(MusicServiceItem):
"""Class that represents a Music Service Favorite."""
valid_fields = ['id', 'item_type', 'title', 'can_play', 'can_cache',
'album_art_uri']
# Since MSFavorites cannot produce didl_metadata, they are not strictly
# required, but it makes sense to require them anyway, since they are the
# fields that that describe the item
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'service_id']
def __init__(self, title, item_id, extended_id, service_id, **kwargs):
content = {'title': title, 'item_id': item_id,
'extended_id': extended_id, 'service_id': service_id}
content.update(kwargs)
super(MSFavorites, self).__init__(**content)
class MSCollection(MusicServiceItem):
"""Class that represents a Music Service Collection."""
valid_fields = ['id', 'item_type', 'title', 'can_play', 'can_cache',
'album_art_uri']
# Since MSCollection cannot produce didl_metadata, they are not strictly
# required, but it makes sense to require them anyway, since they are the
# fields that that describe the item
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'service_id']
def __init__(self, title, item_id, extended_id, service_id, **kwargs):
content = {'title': title, 'item_id': item_id,
'extended_id': extended_id, 'service_id': service_id}
content.update(kwargs)
super(MSCollection, self).__init__(**content)
MS_TYPE_TO_CLASS = {'artist': MSArtist, 'album': MSAlbum, 'track': MSTrack,
'albumList': MSAlbumList, 'favorites': MSFavorites,
'collection': MSCollection, 'playlist': MSPlaylist,
'artistTrackList': MSArtistTracklist}
| mit | 7,402,956,122,372,239,000 | 37.164855 | 79 | 0.584896 | false | 3.876886 | false | false | false |
kuke/models | fluid/PaddleRec/word2vec/preprocess.py | 1 | 5840 | # -*- coding: utf-8 -*
import os
import random
import re
import six
import argparse
import io
import math
prog = re.compile("[^a-z ]", flags=0)
def parse_args():
parser = argparse.ArgumentParser(
description="Paddle Fluid word2 vector preprocess")
parser.add_argument(
'--build_dict_corpus_dir', type=str, help="The dir of corpus")
parser.add_argument(
'--input_corpus_dir', type=str, help="The dir of input corpus")
parser.add_argument(
'--output_corpus_dir', type=str, help="The dir of output corpus")
parser.add_argument(
'--dict_path',
type=str,
default='./dict',
help="The path of dictionary ")
parser.add_argument(
'--min_count',
type=int,
default=5,
help="If the word count is less then min_count, it will be removed from dict"
)
parser.add_argument(
'--downsample',
type=float,
default=0.001,
help="filter word by downsample")
parser.add_argument(
'--filter_corpus',
action='store_true',
default=False,
help='Filter corpus')
parser.add_argument(
'--build_dict',
action='store_true',
default=False,
help='Build dict from corpus')
return parser.parse_args()
def text_strip(text):
#English Preprocess Rule
return prog.sub("", text.lower())
# Shameless copy from Tensorflow https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/text_encoder.py
# Unicode utility functions that work with Python 2 and 3
def native_to_unicode(s):
if _is_unicode(s):
return s
try:
return _to_unicode(s)
except UnicodeDecodeError:
res = _to_unicode(s, ignore_errors=True)
return res
def _is_unicode(s):
if six.PY2:
if isinstance(s, unicode):
return True
else:
if isinstance(s, str):
return True
return False
def _to_unicode(s, ignore_errors=False):
if _is_unicode(s):
return s
error_mode = "ignore" if ignore_errors else "strict"
return s.decode("utf-8", errors=error_mode)
def filter_corpus(args):
"""
filter corpus and convert id.
"""
word_count = dict()
word_to_id_ = dict()
word_all_count = 0
id_counts = []
word_id = 0
#read dict
with io.open(args.dict_path, 'r', encoding='utf-8') as f:
for line in f:
word, count = line.split()[0], int(line.split()[1])
word_count[word] = count
word_to_id_[word] = word_id
word_id += 1
id_counts.append(count)
word_all_count += count
#filter corpus and convert id
if not os.path.exists(args.output_corpus_dir):
os.makedirs(args.output_corpus_dir)
for file in os.listdir(args.input_corpus_dir):
with io.open(args.output_corpus_dir + '/convert_' + file, "w") as wf:
with io.open(
args.input_corpus_dir + '/' + file, encoding='utf-8') as rf:
print(args.input_corpus_dir + '/' + file)
for line in rf:
signal = False
line = text_strip(line)
words = line.split()
for item in words:
if item in word_count:
idx = word_to_id_[item]
else:
idx = word_to_id_[native_to_unicode('<UNK>')]
count_w = id_counts[idx]
corpus_size = word_all_count
keep_prob = (
math.sqrt(count_w /
(args.downsample * corpus_size)) + 1
) * (args.downsample * corpus_size) / count_w
r_value = random.random()
if r_value > keep_prob:
continue
wf.write(_to_unicode(str(idx) + " "))
signal = True
if signal:
wf.write(_to_unicode("\n"))
def build_dict(args):
"""
proprocess the data, generate dictionary and save into dict_path.
:param corpus_dir: the input data dir.
:param dict_path: the generated dict path. the data in dict is "word count"
:param min_count:
:return:
"""
# word to count
word_count = dict()
for file in os.listdir(args.build_dict_corpus_dir):
with io.open(
args.build_dict_corpus_dir + "/" + file, encoding='utf-8') as f:
print("build dict : ", args.build_dict_corpus_dir + "/" + file)
for line in f:
line = text_strip(line)
words = line.split()
for item in words:
if item in word_count:
word_count[item] = word_count[item] + 1
else:
word_count[item] = 1
item_to_remove = []
for item in word_count:
if word_count[item] <= args.min_count:
item_to_remove.append(item)
unk_sum = 0
for item in item_to_remove:
unk_sum += word_count[item]
del word_count[item]
#sort by count
word_count[native_to_unicode('<UNK>')] = unk_sum
word_count = sorted(
word_count.items(), key=lambda word_count: -word_count[1])
with io.open(args.dict_path, 'w+', encoding='utf-8') as f:
for k, v in word_count:
f.write(k + " " + str(v) + '\n')
if __name__ == "__main__":
args = parse_args()
if args.build_dict:
build_dict(args)
elif args.filter_corpus:
filter_corpus(args)
else:
print(
"error command line, please choose --build_dict or --filter_corpus")
| apache-2.0 | -1,929,002,871,442,135,600 | 30.229947 | 134 | 0.52774 | false | 3.834537 | false | false | false |
sigma-random/Triton | examples/callback_after.py | 1 | 8375 |
# Output
#
# $ ./triton ./examples/callback_after.py ./samples/crackmes/crackme_xor a
# 0x40056d: push rbp
# -> #0 = (bvsub (_ bv140735022953896 64) (_ bv8 64)) ; Aligns stack
# -> #1 = (_ bv140735022953936 64)
# -> #2 = (_ bv4195694 64) ; RIP
#
# 0x40056e: mov rbp, rsp
# -> #3 = ((_ extract 63 0) #0)
# -> #4 = (_ bv4195697 64) ; RIP
#
# 0x400571: mov qword ptr [rbp-0x18], rdi
# -> #5 = (_ bv140735022960969 64)
# -> #6 = (_ bv4195701 64) ; RIP
#
# 0x400575: mov dword ptr [rbp-0x4], 0x0
# -> #7 = (_ bv0 32)
# -> #8 = (_ bv4195708 64) ; RIP
#
# 0x40057c: jmp 0x4005bd
# -> #9 = (_ bv4195773 64) ; RIP
#
# 0x4005bd: cmp dword ptr [rbp-0x4], 0x4
# -> #10 = (bvsub #7 ((_ sign_extend 0) (_ bv4 32)))
# -> #11 = (ite (= (_ bv16 32) (bvand (_ bv16 32) (bvxor #10 (bvxor #7 ((_ sign_extend 0) (_ bv4 32)))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #12 = (ite (bvult #7 ((_ sign_extend 0) (_ bv4 32))) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #13 = (ite (= ((_ extract 31 31) (bvand (bvxor #7 ((_ sign_extend 0) (_ bv4 32))) (bvxor #7 #10))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #14 = (ite (= (parity_flag ((_ extract 7 0) #10)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #15 = (ite (= ((_ extract 31 31) #10) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #16 = (ite (= #10 (_ bv0 32)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #17 = (_ bv4195777 64) ; RIP
#
# 0x40057e: mov eax, dword ptr [rbp-0x4]
# -> #19 = ((_ extract 31 0) #10)
# -> #20 = (_ bv4195713 64) ; RIP
#
# 0x400581: movsxd rdx, eax
# -> #21 = ((_ sign_extend 32) ((_ extract 31 0) #19))
# -> #22 = (_ bv4195716 64) ; RIP
#
# 0x400584: mov rax, qword ptr [rbp-0x18]
# -> #23 = ((_ extract 63 0) #5)
# -> #24 = (_ bv4195720 64) ; RIP
#
# 0x400588: add rax, rdx
# -> #25 = (bvadd ((_ extract 63 0) #23) ((_ extract 63 0) #21))
# -> #26 = (ite (= (_ bv16 64) (bvand (_ bv16 64) (bvxor #25 (bvxor ((_ extract 63 0) #23) ((_ extract 63 0) #21))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #27 = (ite (bvult #25 ((_ extract 63 0) #23)) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #28 = (ite (= ((_ extract 63 63) (bvand (bvxor ((_ extract 63 0) #23) (bvnot ((_ extract 63 0) #21))) (bvxor ((_ extract 63 0) #23) #25))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #29 = (ite (= (parity_flag ((_ extract 7 0) #25)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #30 = (ite (= ((_ extract 63 63) #25) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #31 = (ite (= #25 (_ bv0 64)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #32 = (_ bv4195723 64) ; RIP
#
# 0x40058b: movzx eax, byte ptr [rax]
# -> #33 = ((_ zero_extend 24) (_ bv97 8))
# -> #34 = (_ bv4195726 64) ; RIP
#
# 0x40058e: movsx eax, al
# -> #35 = ((_ sign_extend 24) ((_ extract 7 0) #33))
# -> #36 = (_ bv4195729 64) ; RIP
#
# 0x400591: sub eax, 0x1
# -> #37 = (bvsub ((_ extract 31 0) #35) (_ bv1 32))
# -> #38 = (ite (= (_ bv16 32) (bvand (_ bv16 32) (bvxor #37 (bvxor ((_ extract 31 0) #35) (_ bv1 32))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #39 = (ite (bvult ((_ extract 31 0) #35) (_ bv1 32)) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #40 = (ite (= ((_ extract 31 31) (bvand (bvxor ((_ extract 31 0) #35) (_ bv1 32)) (bvxor ((_ extract 31 0) #35) #37))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #41 = (ite (= (parity_flag ((_ extract 7 0) #37)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #42 = (ite (= ((_ extract 31 31) #37) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #43 = (ite (= #37 (_ bv0 32)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #44 = (_ bv4195732 64) ; RIP
#
# 0x400594: xor eax, 0x55
# -> #45 = (bvxor ((_ extract 31 0) #37) (_ bv85 32))
# -> #46 = (_ bv0 1) ; Clears carry flag
# -> #47 = (_ bv0 1) ; Clears overflow flag
# -> #48 = (ite (= (parity_flag ((_ extract 7 0) #45)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #49 = (ite (= ((_ extract 31 31) #45) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #50 = (ite (= #45 (_ bv0 32)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #51 = (_ bv4195735 64) ; RIP
#
# 0x400597: mov ecx, eax
# -> #52 = ((_ extract 31 0) #45)
# -> #53 = (_ bv4195737 64) ; RIP
#
# 0x400599: mov rdx, qword ptr [rip+0x200aa0]
# -> #54 = (_ bv4196036 64)
# -> #55 = (_ bv4195744 64) ; RIP
#
# 0x4005a0: mov eax, dword ptr [rbp-0x4]
# -> #56 = ((_ extract 31 0) #10)
# -> #57 = (_ bv4195747 64) ; RIP
#
# 0x4005a3: cdqe
# -> #58 = ((_ sign_extend 32) ((_ extract 31 0) #56))
# -> #59 = (_ bv4195749 64) ; RIP
#
# 0x4005a5: add rax, rdx
# -> #60 = (bvadd ((_ extract 63 0) #58) ((_ extract 63 0) #54))
# -> #61 = (ite (= (_ bv16 64) (bvand (_ bv16 64) (bvxor #60 (bvxor ((_ extract 63 0) #58) ((_ extract 63 0) #54))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #62 = (ite (bvult #60 ((_ extract 63 0) #58)) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #63 = (ite (= ((_ extract 63 63) (bvand (bvxor ((_ extract 63 0) #58) (bvnot ((_ extract 63 0) #54))) (bvxor ((_ extract 63 0) #58) #60))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #64 = (ite (= (parity_flag ((_ extract 7 0) #60)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #65 = (ite (= ((_ extract 63 63) #60) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #66 = (ite (= #60 (_ bv0 64)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #67 = (_ bv4195752 64) ; RIP
#
# 0x4005a8: movzx eax, byte ptr [rax]
# -> #68 = ((_ zero_extend 24) (_ bv49 8))
# -> #69 = (_ bv4195755 64) ; RIP
#
# 0x4005ab: movsx eax, al
# -> #70 = ((_ sign_extend 24) ((_ extract 7 0) #68))
# -> #71 = (_ bv4195758 64) ; RIP
#
# 0x4005ae: cmp ecx, eax
# -> #72 = (bvsub ((_ extract 31 0) #52) ((_ extract 31 0) #70))
# -> #73 = (ite (= (_ bv16 32) (bvand (_ bv16 32) (bvxor #72 (bvxor ((_ extract 31 0) #52) ((_ extract 31 0) #70))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #74 = (ite (bvult ((_ extract 31 0) #52) ((_ extract 31 0) #70)) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #75 = (ite (= ((_ extract 31 31) (bvand (bvxor ((_ extract 31 0) #52) ((_ extract 31 0) #70)) (bvxor ((_ extract 31 0) #52) #72))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #76 = (ite (= (parity_flag ((_ extract 7 0) #72)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #77 = (ite (= ((_ extract 31 31) #72) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #78 = (ite (= #72 (_ bv0 32)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #79 = (_ bv4195760 64) ; RIP
#
# 0x4005b0: jz 0x4005b9
# -> #80 = (ite (= #78 (_ bv1 1)) (_ bv4195769 64) (_ bv4195762 64)) ; RIP
#
# 0x4005b2: mov eax, 0x1
# -> #81 = (_ bv1 32)
# -> #82 = (_ bv4195767 64) ; RIP
#
# 0x4005b7: jmp 0x4005c8
# -> #83 = (_ bv4195784 64) ; RIP
#
# 0x4005c8: pop rbp
# -> #84 = #1
# -> #85 = (bvadd #0 (_ bv8 64)) ; Aligns stack
# -> #86 = (_ bv4195785 64) ; RIP
#
# loose
# $
from triton import *
# A callback must be a function with one argument. This argument is
# always the Instruction class and contains all information
def my_callback_after(instruction):
print '%#x: %s' %(instruction.address, instruction.assembly)
for se in instruction.symbolicExpressions:
print '\t -> #%d = %s %s' %(se.getId(), se.getAst(), (('; ' + se.getComment()) if se.getComment() is not None else ''))
print
if __name__ == '__main__':
# Start the symbolic analysis from the 'check' function
startAnalysisFromSymbol('check')
# Add a callback.
# BEFORE: Add the callback before the instruction processing
# AFTER: Add the callback after the instruction processing
# FINI: Add the callback at the end of the execution
addCallback(my_callback_after, IDREF.CALLBACK.AFTER)
# Run the instrumentation - Never returns
runProgram()
| lgpl-3.0 | 4,867,103,924,392,754,000 | 47.410405 | 200 | 0.469373 | false | 2.530211 | false | false | false |
lelit/tailor | vcpx/tests/svn.py | 1 | 13407 | # -*- mode: python; coding: utf-8 -*-
# :Progetto: vcpx -- svn specific tests
# :Creato: gio 11 nov 2004 19:09:06 CET
# :Autore: Lele Gaifax <[email protected]>
# :Licenza: GNU General Public License
#
from unittest import TestCase
from datetime import datetime
from vcpx.repository.svn import changesets_from_svnlog
from vcpx.tzinfo import UTC
class FakeLogger:
def warning(self, *args):
pass
debug = warning
class FakeRepository:
def __init__(self, repo, module):
self.repository = repo
self.module = module
self.log = FakeLogger()
FR = FakeRepository
class SvnLogParser(TestCase):
"""Ensure the svn log parser does its job"""
def getSvnLog(self, testname):
from os.path import join, split
logname = join(split(__file__)[0], 'data', testname)+'.log'
return file(logname)
def testRenameBehaviour(self):
"""Verify svn log parser behaves correctly on renames"""
log = self.getSvnLog('svn-simple_rename_test')
csets = changesets_from_svnlog(log, FR('file:///tmp/t/repo', '/trunk'))
cset = csets.next()
self.assertEqual(cset.author, 'lele')
self.assertEqual(cset.date, datetime(2004,11,12,15,05,37,134366,UTC))
self.assertEqual(cset.log, 'create tree')
self.assertEqual(len(cset.entries), 2)
entry = cset.entries[0]
self.assertEqual(entry.name, 'dir')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[1]
self.assertEqual(entry.name, 'dir/a.txt')
self.assertEqual(entry.action_kind, entry.ADDED)
cset = csets.next()
self.assertEqual(cset.author, 'lele')
self.assertEqual(cset.date, datetime(2004,11,12,15,06,04,193650,UTC))
self.assertEqual(cset.log, 'rename dir')
self.assertEqual(len(cset.entries), 1)
entry = cset.entries[0]
self.assertEqual(entry.name, 'new')
self.assertEqual(entry.action_kind, entry.RENAMED)
self.assertEqual(entry.old_name, 'dir')
self.assertRaises(StopIteration, csets.next)
def testRenameOutBehaviour(self):
"""Verify svn log parser behaves correctly on renames out of scope"""
log = self.getSvnLog('svn-rename_out_test')
csets = changesets_from_svnlog(log,
FR('http://srv/svn/Shtoom', '/trunk'))
cset = csets.next()
self.assertEqual(cset.author, 'anthony')
self.assertEqual(cset.date, datetime(2004,11,9,6,54,20,709243,UTC))
self.assertEqual(cset.log, 'Moving to a /sandbox\n')
self.assertEqual(len(cset.entries), 1)
entry = cset.entries[0]
self.assertEqual(entry.name, 'shtoom/tmp')
self.assertEqual(entry.action_kind, entry.DELETED)
self.assertRaises(StopIteration, csets.next)
def testCopyAndRename(self):
"""Verify svn log parser behaves correctly on copies"""
log = self.getSvnLog('svn-copy_and_rename_test')
csets = list(changesets_from_svnlog(log,
FR('file:///tmp/rep', '/test')))
self.assertEqual(len(csets), 4)
cset = csets[1]
self.assertEqual(cset.author, 'lele')
self.assertEqual(cset.date, datetime(2005,1,8, 17,36,55,174757,UTC))
self.assertEqual(cset.log, 'Copy')
self.assertEqual(len(cset.entries), 1)
entry = cset.entries[0]
self.assertEqual(entry.name, 'file2.txt')
self.assertEqual(entry.action_kind, entry.ADDED)
self.assertEqual(entry.old_name, 'file1.txt')
cset = csets[2]
self.assertEqual(cset.date, datetime(2005,1,8, 17,42,41,347315,UTC))
self.assertEqual(cset.log, 'Remove')
self.assertEqual(len(cset.entries), 1)
entry = cset.entries[0]
self.assertEqual(entry.name, 'file1.txt')
self.assertEqual(entry.action_kind, entry.DELETED)
cset = csets[3]
self.assertEqual(cset.date, datetime(2005,1,8, 17,43,9,909127,UTC))
self.assertEqual(cset.log, 'Move')
self.assertEqual(len(cset.entries), 1)
entry = cset.entries[0]
self.assertEqual(entry.name, 'file1.txt')
self.assertEqual(entry.action_kind, entry.RENAMED)
self.assertEqual(entry.old_name, 'file2.txt')
def testREvent(self):
"""Verify how tailor handle svn "R" event"""
log = self.getSvnLog('svn-svn_r_event_test')
csets = changesets_from_svnlog(log, FR('file:///tmp/rep', '/trunk'))
cset = csets.next()
cset = csets.next()
self.assertEqual(cset.author, 'cmlenz')
self.assertEqual(cset.date, datetime(2005,3,21, 8,34, 2,522947,UTC))
self.assertEqual(len(cset.entries), 7)
entry = cset.entries[0]
self.assertEqual(entry.name, 'setup.py')
self.assertEqual(entry.action_kind, entry.UPDATED)
entry = cset.entries[1]
self.assertEqual(entry.name, 'trac/scripts')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[2]
self.assertEqual(entry.name, 'trac/scripts/__init__.py')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[3]
self.assertEqual(entry.name, 'trac/scripts/admin.py')
self.assertEqual(entry.action_kind, entry.RENAMED)
self.assertEqual(entry.old_name, 'scripts/trac-admin')
entry = cset.entries[4]
self.assertEqual(entry.name, 'trac/tests/environment.py')
self.assertEqual(entry.action_kind, entry.UPDATED)
entry = cset.entries[6]
self.assertEqual(entry.name, 'scripts/trac-admin')
self.assertEqual(entry.action_kind, entry.ADDED)
self.assertRaises(StopIteration, csets.next)
def testRenameReplace(self):
"""Verify how tailor handle svn "R" event on renames"""
log = self.getSvnLog('svn-rename_replace')
csets = changesets_from_svnlog(log, FR('file:///tmp/rep',
'/cedar-backup2/trunk'))
cset = csets.next()
self.assertEqual(len(cset.entries), 7)
for entry, expected in map(None, cset.entries,
(('Makefile', 'UPD'),
('test', 'REN', 'unittest'),
('test/__init__.py', 'ADD'),
('test/filesystemtests.py', 'ADD'),
('test/knapsacktests.py', 'ADD'),
('util/createtree.py', 'UPD'),
('test/data', 'REN', 'unittest/data'))):
self.assertEqual(entry.name, expected[0])
self.assertEqual(entry.action_kind, expected[1],
msg=entry.name+': got %r, expected %r' %
(entry.action_kind, expected[1]))
if expected[1]=='REN':
self.assertEqual(entry.old_name, expected[2],
msg=entry.name+': got %r, expected %r' %
(entry.old_name, expected[2]))
def testTrackingRoot(self):
"""Verify we are able to track the root of the repository"""
log = self.getSvnLog('svn-svn_repos_root_test')
csets = list(changesets_from_svnlog(log,
FR('svn+ssh://caia/tmp/svn', '/')))
self.assertEqual(len(csets), 4)
cset = csets[1]
self.assertEqual(len(cset.entries), 3)
entry = cset.entries[0]
self.assertEqual(entry.name, 'branches/branch-a')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[1]
self.assertEqual(entry.name, 'branches/branch-a/a.txt')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[2]
self.assertEqual(entry.name, 'branches/branch-a/b.txt')
self.assertEqual(entry.action_kind, entry.ADDED)
def testPydistStrangeCase(self):
"""Verify we are able to groke with svn 'R' strangeness"""
log = self.getSvnLog('svn-pydist_strange_case')
csets = changesets_from_svnlog(log, FR('http://srv/svn', '/py/dist'))
cset = csets.next()
self.assertEqual(len(cset.entries), 3)
entry = cset.entries[0]
self.assertEqual(entry.name, 'py/documentation/example')
self.assertEqual(entry.action_kind, entry.RENAMED)
self.assertEqual(entry.old_name, 'example')
entry = cset.entries[1]
self.assertEqual(entry.name, 'py/documentation/test.txt')
self.assertEqual(entry.action_kind, entry.UPDATED)
entry = cset.entries[2]
self.assertEqual(entry.name, 'py/documentation/example/test')
self.assertEqual(entry.action_kind, entry.RENAMED)
self.assertEqual(entry.old_name, 'example/test')
self.assertRaises(StopIteration, csets.next)
def testUnicode(self):
"""Verify svn parser returns unicode strings"""
log = self.getSvnLog('svn-encoding_test')
csets = changesets_from_svnlog(log, FR('http://srv/plone/CMFPlone',
'/branches/2.1'))
log = csets.next().log
self.assertEqual(type(log), type(u'€'))
self.assertEqual(len(log), 91)
self.assertRaises(UnicodeEncodeError, log.encode, 'iso-8859-1')
self.assertEqual(len(log.encode('ascii', 'ignore')), 90)
self.assertRaises(StopIteration, csets.next)
def testCopyAndReplace(self):
"""Verify the svn parser handle copy+replace"""
log = self.getSvnLog('svn-copy_and_replace_test')
csets = changesets_from_svnlog(log,
FR('http://srv/repos/trac', '/trunk'))
cset = csets.next()
self.assertEqual(len(cset.entries), 7)
entry = cset.entries[0]
self.assertEqual(entry.name, 'setup.py')
self.assertEqual(entry.action_kind, entry.UPDATED)
entry = cset.entries[1]
self.assertEqual(entry.name, 'trac/scripts')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[2]
self.assertEqual(entry.name, 'trac/scripts/__init__.py')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[3]
self.assertEqual(entry.name, 'trac/scripts/admin.py')
self.assertEqual(entry.action_kind, entry.RENAMED)
self.assertEqual(entry.old_name, 'scripts/trac-admin')
entry = cset.entries[4]
self.assertEqual(entry.name, 'trac/tests/environment.py')
self.assertEqual(entry.action_kind, entry.UPDATED)
entry = cset.entries[5]
self.assertEqual(entry.name, 'trac/tests/tracadmin.py')
self.assertEqual(entry.action_kind, entry.UPDATED)
entry = cset.entries[6]
self.assertEqual(entry.name, 'scripts/trac-admin')
self.assertEqual(entry.action_kind, entry.ADDED)
def testCopyFromAndRemove(self):
"""Verify the svn parser handle copyfrom+remove"""
log = self.getSvnLog('svn-copyfrom_and_remove_test')
csets = changesets_from_svnlog(log, FR('http://srv/samba',
'/branches/SAMBA_4_0'))
cset = csets.next()
self.assertEqual(len(cset.entries), 4)
entry = cset.entries[0]
self.assertEqual(entry.name, 'source/nsswitch')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[1]
self.assertEqual(entry.name, 'source/nsswitch/config.m4')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[2]
self.assertEqual(entry.name, 'source/nsswitch/wb_common.c')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[3]
self.assertEqual(entry.name, 'source/nsswitch/wins.c')
self.assertEqual(entry.action_kind, entry.DELETED)
def testIncrementalParser(self):
"""Verify that the svn log parser is effectively incremental"""
log = self.getSvnLog('svn-svn_repos_root_test')
csets = list(changesets_from_svnlog(log,
FR('svn+ssh://caia/tmp/svn', '/'),
chunksize=100))
self.assertEqual(len(csets), 4)
def testExternalCopies(self):
"""Verify that external copies+deletions are handled ok"""
log = self.getSvnLog('svn-external_copies_test')
csets = changesets_from_svnlog(log,
FR('svn+ssh://caia/tmp/svn', '/trunk'))
cset = csets.next()
cset = csets.next()
self.assertEqual(len(cset.entries), 5)
entry = cset.removedEntries()[0]
self.assertEqual(entry.name, 'README_LOGIN')
cset = csets.next()
self.assertEqual(len(cset.entries), 5)
def testCollidingNames(self):
"""Verify svn log parser behaves correctly with colliding names"""
# Sorry, couldn't find a better name
log = self.getSvnLog('svn-colliding_names_test')
csets = changesets_from_svnlog(log,
FR('svn://ixion.tartarus.org/main', '/putty'))
cset = csets.next()
self.assertEqual(len(cset.entries), 1)
| gpl-3.0 | 3,799,975,612,509,294,000 | 36.54902 | 85 | 0.596121 | false | 3.578484 | true | false | false |
peterhinch/micropython-epaper | epaper.py | 1 | 19329 | # epaper.py main module for Embedded Artists' 2.7 inch E-paper Display.
# Peter Hinch
# version 0.9
# 17 Jun 2018 Adapted for VFS mount/unmount.
# 18 Mar 2016 Adafruit module and fast (partial) updates.
# 2 Mar 2016 Power control support removed. Support for fonts as persistent byte code
# 29th Jan 2016 Monospaced fonts supported.
# Copyright 2015 Peter Hinch
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
# Code translated and developed from https://developer.mbed.org/users/dreschpe/code/EaEpaper/
import pyb, gc, uos
from panel import NORMAL, FAST, EMBEDDED_ARTISTS, ADAFRUIT
LINES_PER_DISPLAY = const(176) # 2.7 inch panel only!
BYTES_PER_LINE = const(33)
BITS_PER_LINE = const(264)
gc.collect()
NEWLINE = const(10) # ord('\n')
class EPDError(OSError):
pass
def checkstate(state, msg):
if not state:
raise EPDError(msg)
# Generator parses an XBM file returning width, height, followed by data bytes
def get_xbm_data(sourcefile):
errmsg = ''.join(("File: '", sourcefile, "' is not a valid XBM file"))
try:
with open(sourcefile, 'r') as f:
phase = 0
for line in f:
if phase < 2:
if line.startswith('#define'):
yield int(line.split(' ')[-1])
phase += 1
if phase == 2:
start = line.find('{')
if start >= 0:
line = line[start +1:]
phase += 1
if phase == 3:
if not line.isspace():
phase += 1
if phase == 4:
end = line.find('}')
if end >=0 :
line = line[:end]
phase += 1
hexnums = line.split(',')
if hexnums[0] != '':
for hexnum in [q for q in hexnums if not q.isspace()]:
yield int(hexnum, 16)
if phase != 5 :
print(errmsg)
except OSError:
print("Can't open " + sourcefile + " for reading")
class FontFileError(Exception):
pass
class Font(object):
def __init__(self):
self.bytes_per_ch = 0 # Number of bytes to define a character
self.bytes_horiz = 0 # No. of bytes per character row
self.bits_horiz = 0 # Horzontal bits in character matrix
self.bits_vert = 0 # Vertical bits in character matrix
self.monospaced = False # Default is variable width
self.exists = False
self.modfont = None
self.fontfilename = None
self.fontfile = None
# monospaced only applies to binary files. Since these lack an index FIXME
# characters are saved in fixed pitch with width data, hence can be
# rendered as fixed or variable pitch.
# Python fonts are saved as variable or fixed pitch depending on the -f arg.
# The monospaced flag saved with the file enables the renderer to
# determine the correct x advance.
def __call__(self, fontfilename, monospaced = False):
self.fontfilename = fontfilename
self.monospaced = monospaced
return self
def __enter__(self): #fopen(self, fontfile):
if isinstance(self.fontfilename, type(uos)): # Using a Python font
self.fontfile = None
f = self.fontfilename
ok = False
try:
ok = f.hmap() and f.reverse()
except AttributeError:
pass
if not ok:
raise FontFileError('Font module {} is invalid'.format(f.__name__))
self.monospaced = f.monospaced()
self.modfont = f
self.bits_horiz = f.max_width()
self.bits_vert = f.height()
else:
self.modfont = None
try:
f = open(self.fontfilename, 'rb')
except OSError as err:
raise FontFileError(err)
self.fontfile = f
header = f.read(4)
if header[0] == 0x42 and header[1] == 0xe7:
self.bits_horiz = header[2] # font[1]
self.bits_vert = header[3] # font[2]
else:
raise FontFileError('Font file {} is invalid'.format(self.fontfilename))
self.bytes_horiz = (self.bits_horiz + 7) // 8
self.bytes_per_ch = self.bytes_horiz * self.bits_vert
self.exists = True
return self
def __exit__(self, *_):
self.exists = False
if self.fontfile is not None:
self.fontfile.close()
class Display(object):
FONT_HEADER_LENGTH = 4
def __init__(self, side='L',*, mode=NORMAL, model=EMBEDDED_ARTISTS, use_flash=False, up_time=None):
self.flash = None # Assume flash is unused
self.in_context = False
try:
intside = {'l':0, 'r':1}[side.lower()]
except (KeyError, AttributeError):
raise ValueError("Side must be 'L' or 'R'")
if model not in (EMBEDDED_ARTISTS, ADAFRUIT):
raise ValueError('Unsupported model')
if mode == FAST and use_flash:
raise ValueError('Flash memory unavailable in fast mode')
if mode == NORMAL and up_time is not None:
raise ValueError('Cannot set up_time in normal mode')
if mode == NORMAL:
from epd import EPD
self.epd = EPD(intside, model)
elif mode == FAST:
from epdpart import EPD
self.epd = EPD(intside, model, up_time)
else:
raise ValueError('Unsupported mode {}'.format(mode))
self.mode = mode
self.font = Font()
gc.collect()
self.locate(0, 0) # Text cursor: default top left
self.mounted = False # umountflash() not to sync
if use_flash:
from flash import FlashClass
gc.collect()
self.flash = FlashClass(intside)
self.umountflash() # In case mounted by prior tests.
self.mountflash()
gc.collect()
def checkcm(self):
if not (self.mode == NORMAL or self.in_context):
raise EPDError('Fast mode must be run using a context manager')
def __enter__(self): # Power up
checkstate(self.mode == FAST, "In normal mode, can't use context manager")
self.in_context = True
self.epd.enter()
return self
def __exit__(self, *_): # shut down
self.in_context = False
self.epd.exit()
pass
def mountflash(self):
if self.flash is None: # Not being used
return
self.flash.begin() # Initialise.
vfs = uos.VfsFat(self.flash) # Instantiate FAT filesystem
uos.mount(vfs, self.flash.mountpoint)
self.mounted = True
def umountflash(self): # Unmount flash
if self.flash is None:
return
if self.mounted:
self.flash.synchronise()
try:
uos.umount(self.flash.mountpoint)
except OSError:
pass # Don't care if it wasn't mounted
self.flash.end() # Shut down
self.mounted = False # flag unmounted to prevent spurious syncs
def show(self):
self.checkcm()
self.umountflash() # sync, umount flash, shut it down and disable SPI
if self.mode == NORMAL: # EPD functions which access the display electronics must be
with self.epd as epd: # called from a with block to ensure proper startup & shutdown
epd.showdata()
else: # Fast mode: already in context manager
self.epd.showdata()
self.mountflash()
def clear_screen(self, show=True, both=False):
self.checkcm()
self.locate(0, 0) # Reset text cursor
self.epd.clear_data(both)
if show:
if self.mode == NORMAL:
self.show()
else:
self.epd.EPD_clear()
def refresh(self, fast =True): # Fast mode only functions
checkstate(self.mode == FAST, 'refresh() invalid in normal mode')
self.checkcm()
self.epd.refresh(fast)
def exchange(self, clear_data):
checkstate(self.mode == FAST, 'exchange() invalid in normal mode')
self.checkcm()
self.epd.exchange(clear_data)
@property
def temperature(self): # return temperature as integer in Celsius
return self.epd.temperature
@property
def location(self):
return self.char_x, self.char_y
@micropython.native
def setpixel(self, x, y, black): # 41uS. Clips to borders. x, y must be integer
if y < 0 or y >= LINES_PER_DISPLAY or x < 0 or x >= BITS_PER_LINE :
return
image = self.epd.image
omask = 1 << (x & 0x07)
index = (x >> 3) + y *BYTES_PER_LINE
if black:
image[index] |= omask
else:
image[index] &= (omask ^ 0xff)
@micropython.viper
def setpixelfast(self, x: int, y: int, black: int): # 27uS. Caller checks bounds
image = ptr8(self.epd.image)
omask = 1 << (x & 0x07)
index = (x >> 3) + y * 33 #BYTES_PER_LINE
if black:
image[index] |= omask
else:
image[index] &= (omask ^ 0xff)
# ****** Simple graphics support ******
def _line(self, x0, y0, x1, y1, black = True): # Sinle pixel line
dx = x1 -x0
dy = y1 -y0
dx_sym = 1 if dx > 0 else -1
dy_sym = 1 if dy > 0 else -1
dx = dx_sym*dx
dy = dy_sym*dy
dx_x2 = dx*2
dy_x2 = dy*2
if (dx >= dy):
di = dy_x2 - dx
while (x0 != x1):
self.setpixel(x0, y0, black)
x0 += dx_sym
if (di<0):
di += dy_x2
else :
di += dy_x2 - dx_x2
y0 += dy_sym
self.setpixel(x0, y0, black)
else:
di = dx_x2 - dy
while (y0 != y1):
self.setpixel(x0, y0, black)
y0 += dy_sym
if (di < 0):
di += dx_x2
else:
di += dx_x2 - dy_x2
x0 += dx_sym
self.setpixel(x0, y0, black)
def line(self, x0, y0, x1, y1, width =1, black = True): # Draw line
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if abs(x1 - x0) > abs(y1 - y0): # < 45 degrees
for w in range(-width//2 +1, width//2 +1):
self._line(x0, y0 +w, x1, y1 +w, black)
else:
for w in range(-width//2 +1, width//2 +1):
self._line(x0 +w, y0, x1 +w, y1, black)
def _rect(self, x0, y0, x1, y1, black): # Draw rectangle
self.line(x0, y0, x1, y0, 1, black)
self.line(x0, y0, x0, y1, 1, black)
self.line(x0, y1, x1, y1, 1, black)
self.line(x1, y0, x1, y1, 1, black)
def rect(self, x0, y0, x1, y1, width =1, black = True): # Draw rectangle
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
x0, x1 = (x0, x1) if x1 > x0 else (x1, x0) # x0, y0 is top left, x1, y1 is bottom right
y0, y1 = (y0, y1) if y1 > y0 else (y1, y0)
for w in range(width):
self._rect(x0 +w, y0 +w, x1 -w, y1 -w, black)
def fillrect(self, x0, y0, x1, y1, black = True): # Draw filled rectangle
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
x0, x1 = (x0, x1) if x1 > x0 else (x1, x0)
y0, y1 = (y0, y1) if y1 > y0 else (y1, y0)
for x in range(x0, x1):
for y in range(y0, y1):
self.setpixel(x, y, black)
def _circle(self, x0, y0, r, black = True): # Single pixel circle
x = -r
y = 0
err = 2 -2*r
while x <= 0:
self.setpixel(x0 -x, y0 +y, black)
self.setpixel(x0 +x, y0 +y, black)
self.setpixel(x0 +x, y0 -y, black)
self.setpixel(x0 -x, y0 -y, black)
e2 = err
if (e2 <= y):
y += 1
err += y*2 +1
if (-x == y and e2 <= x):
e2 = 0
if (e2 > x):
x += 1
err += x*2 +1
def circle(self, x0, y0, r, width =1, black = True): # Draw circle
x0, y0, r = int(x0), int(y0), int(r)
for r in range(r, r -width, -1):
self._circle(x0, y0, r, black)
def fillcircle(self, x0, y0, r, black = True): # Draw filled circle
x0, y0, r = int(x0), int(y0), int(r)
x = -r
y = 0
err = 2 -2*r
while x <= 0:
self._line(x0 -x, y0 -y, x0 -x, y0 +y, black)
self._line(x0 +x, y0 -y, x0 +x, y0 +y, black)
e2 = err
if (e2 <= y):
y +=1
err += y*2 +1
if (-x == y and e2 <= x):
e2 = 0
if (e2 > x):
x += 1
err += x*2 +1
# ****** Image display ******
def load_xbm(self, sourcefile, x = 0, y = 0):
g = get_xbm_data(sourcefile)
width = next(g)
height = next(g)
self.loadgfx(g, width, height, x, y)
# Load a rectangular region with a bitmap supplied by a generator.
def loadgfx(self, gen, width, height, x0, y0):
byteoffset = x0 >> 3
bitshift = x0 & 7 # Offset of image relative to byte boundary
bytes_per_line = width >> 3
if width & 7 > 0:
bytes_per_line += 1
for line in range(height):
y = y0 + line
if y >= LINES_PER_DISPLAY:
break
index = y * BYTES_PER_LINE + byteoffset
bitsleft = width
x = x0
for byte in range(bytes_per_line):
val = next(gen)
bits_to_write = min(bitsleft, 8)
x += bits_to_write
if x <= BITS_PER_LINE:
if bitshift == 0 and bits_to_write == 8:
self.epd.image[index] = val
index += 1
else:
mask = ((1 << bitshift) -1) # Bits in current byte to preserve
bitsused = bitshift + bits_to_write
overflow = max(0, bitsused -8)
underflow = max(0, 8 -bitsused)
if underflow: # Underflow in current byte
mask = (mask | ~((1 << bitsused) -1)) & 0xff
nmask = ~mask & 0xff # Bits to overwrite
self.epd.image[index] = (self.epd.image[index] & mask) | ((val << bitshift) & nmask)
index += 1
if overflow : # Bits to write to next byte
mask = ~((1 << overflow) -1) & 0xff # Preserve
self.epd.image[index] = (self.epd.image[index] & mask) | (val >> (8 - bitshift))
bitsleft -= bits_to_write
# ****** Text support ******
def locate(self, x, y): # set cursor position
self.char_x = x # Text input cursor to (x, y)
self.char_y = y
# font.bytes_horiz
# In cse of font file it's the pysical width of every character as stored in file
# In case of Python font it's the value of max_width converted to bytes
def _character(self, c, usefile):
font = self.font # Cache for speed
bits_vert = font.bits_vert
if usefile:
ff = font.fontfile
ff.seek(self.FONT_HEADER_LENGTH + (c -32) * (font.bytes_per_ch + 1))
buf = ff.read(font.bytes_per_ch + 1)
# Characters are stored as constant width.
bytes_horiz = font.bytes_horiz # No. of bytes before next row
# Advance = bits_horiz if variable pitch else font.bits_horiz
bits_horiz = buf[0]
offset = 1
else:
modfont = font.modfont
buf, height, bits_horiz = modfont.get_ch(chr(c))
# Width varies between characters
bytes_horiz = (bits_horiz + 7) // 8
offset = 0
# Sanity checks: prevent index errors. Wrapping should be done at string/word level.
if (self.char_x + bytes_horiz * 8) > BITS_PER_LINE :
self.char_x = 0
self.char_y += bits_vert
if self.char_y >= (LINES_PER_DISPLAY - bits_vert):
self.char_y = 0
image = self.epd.image
y = self.char_y # x, y are pixel coordinates
for bit_vert in range(bits_vert): # for each vertical line
x = self.char_x
for byte_horiz in range(bytes_horiz):
fontbyte = buf[bit_vert * bytes_horiz + byte_horiz + offset]
index = (x >> 3) + y * BYTES_PER_LINE
nbits = x & 0x07
if nbits == 0:
image[index] = fontbyte
else:
image[index] &= (0xff >> (8 - nbits))
image[index] |= (fontbyte << nbits)
image[index + 1] &= (0xff << nbits)
image[index + 1] |= (fontbyte >> (8 - nbits))
x += 8
y += 1
self.char_x += font.bits_horiz if font.monospaced else bits_horiz
def _putc(self, value, usefile): # print char
if (value == NEWLINE):
self.char_x = 0
self.char_y += self.font.bits_vert
if (self.char_y >= LINES_PER_DISPLAY - self.font.bits_vert):
self.char_y = 0
else:
self._character(value, usefile)
return value
def puts(self, s): # Output a string at cursor
if self.font.exists:
if self.font.modfont is None: # No font module: using binary file
for char in s:
c = ord(char)
if (c > 31 and c < 127) or c == NEWLINE:
self._putc(c, True)
else: # Python font file is self-checking
for char in s:
self._putc(ord(char), False)
else:
raise FontFileError("There is no current font")
| apache-2.0 | 1,606,973,317,774,428,700 | 37.735471 | 110 | 0.496197 | false | 3.690854 | false | false | false |
ecreall/nova-ideo | novaideo/utilities/alerts_utility.py | 1 | 9428 | # -*- coding: utf8 -*-
# Copyright (c) 2015 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import json
import requests
import re
from persistent.list import PersistentList
# from urllib.request import urlopen
from pyramid.threadlocal import get_current_request
from substanced.util import get_oid
from pyramid_sms.utils import normalize_us_phone_number
from pyramid_sms.outgoing import send_sms
from dace.objectofcollaboration.principal.util import get_current
import html_diff_wrapper
from novaideo.ips.mailer import mailer_send
# from novaideo.content.resources import (
# arango_server, create_collection)
from novaideo.content.alert import INTERNAL_ALERTS
from novaideo.utilities.util import connect
from novaideo.content.comment import Comment, Commentable
from novaideo import log, _
# SLACK_CHANNELS = {
# 'id': {'url': 'url',
# 'name': 'name'}
# }
# def alert_slack(senders=[], recipients=[], **kwargs):
# """
# recipients: ['improve', 'questionnaire']
# """
# for recipient in recipients:
# channel_data = SLACK_CHANNELS[recipient]
# kwargs['channel'] = "#" + channel_data['name']
# kwargs['username'] = 'webhookbot'
# kwargs = 'payload=' + json.dumps(kwargs)
# url = channel_data['url']
# urlopen(url, kwargs.encode())
# def alert_arango(senders=[], recipients=[], **kwargs):
# """
# recipients: ['creationculturelle.improve']
# """
# for recipient in recipients:
# recipient_parts = recipient.split('.')
# db_id = recipient_parts[0]
# collection_id = recipient_parts[1]
# db = arango_server.db(db_id)
# if db:
# collection = create_collection(db, collection_id)
# collection.create_document(kwargs)
def get_user_data(user, id, request=None):
if not isinstance(user, str):
if not request:
request = get_current_request()
localizer = request.localizer
user_title = getattr(user, 'user_title', '')
user_title = localizer.translate(_(user_title)) \
if user_title else ''
return {
id+'_title': user_title,
id+'_last_name': getattr(user, 'last_name', ''),
id+'_first_name': getattr(user, 'first_name', ''),
}
return {
id+'_title': '',
id+'_last_name': '',
id+'_first_name': '',
}
def get_entity_data(entity, id, request=None):
if not request:
request = get_current_request()
def default_presentation_text(nb_characters=400):
return getattr(entity, 'description', "")[:nb_characters]+'...'
def default_get_url(request):
request.resource_url(entity, '@@index')
title = "The " + entity.__class__.__name__.lower()
entity_type = request.localizer.translate(_(title))
return {
id+'_title': getattr(entity, 'title', ''),
id+'_content': getattr(
entity, 'presentation_text', default_presentation_text)(),
id+'_url': getattr(
entity, 'get_url', default_get_url)(request),
id+'_oid': get_oid(entity, 'None'),
id+'_type': entity_type,
id+'_icon': getattr(entity, 'icon', ''),
}
def alert_comment_nia(context, request, root, **kwargs):
nia = root['principals']['users'].get('nia', None)
channel = context.channel
kind = kwargs.pop('internal_kind', None)
alert_class = INTERNAL_ALERTS.get(kind, None)
if nia and channel and alert_class:
# For Nia the alert is volatil
alert = alert_class(**kwargs)
alert.subject = context
comment_text = alert.render('nia', None, request).strip()
# remove spaces and new lines between tags
comment_text = re.sub('>[\n|\r|\s]*<', '><', comment_text)
comment = Comment(
intention=_('Remark'),
comment=comment_text
)
if isinstance(context, Commentable):
context.addtoproperty('comments', comment)
else:
channel.addtoproperty('comments', comment)
channel.add_comment(comment)
comment.format(request, True)
comment.formatted_comment = '<div class="bot-message">' + \
comment.formatted_comment +\
'</div>'
comment.state = PersistentList(['published'])
comment.reindex()
comment.setproperty('author', nia)
if kwargs.get('related_contents', []):
related_contents = kwargs.get('related_contents')
correlation = connect(
context,
list(related_contents),
{'comment': comment.comment,
'type': comment.intention},
nia,
unique=True)
comment.setproperty('related_correlation', correlation[0])
context.reindex()
def alert_email(senders=[], recipients=[], exclude=[], **kwargs):
"""
recipients: ['[email protected]']
"""
admin_example_mail = '[email protected]'
sender = senders[0]
subject = kwargs.get('subject', '')
mail = kwargs.get('body', None)
html = kwargs.get('html', None)
attachments = kwargs.get('attachments', [])
if admin_example_mail in recipients:
recipients.remove(admin_example_mail)
if recipients and (mail or html):
mailer_send(
subject=subject, body=mail,
html=html, attachments=attachments,
recipients=recipients, sender=sender)
def alert_sms(senders=[], recipients=[], exclude=[], **kwargs):
"""
recipients: ['+33....']
"""
message = kwargs.get('message', None)
request = kwargs.get('request', get_current_request())
for recipient in recipients:
to = normalize_us_phone_number(recipient)
send_sms(request, to, message)
def alert_internal(senders=[], recipients=[], exclude=[], **kwargs):
"""
recipients: [user1, user2],
kwargs: {'internal_kind': 'content_alert',...}
"""
kind = kwargs.pop('internal_kind', None)
alert_class = INTERNAL_ALERTS.get(kind, None)
if alert_class and recipients:
subjects = kwargs.pop('subjects', [])
sender = senders[0]
alert = alert_class(**kwargs)
sender.addtoproperty('alerts', alert)
alert.init_alert(recipients, subjects, exclude)
if getattr(sender, 'activate_push_notification', False):
app_id = getattr(sender, 'app_id')
app_key = getattr(sender, 'app_key')
def send_notification(players_ids, excluded_ids=[]):
subject = subjects[0] if subjects else sender
request = get_current_request()
user = get_current(request)
notification_data = alert_class.get_notification_data(
subject, user, request, alert)
header = {
"Content-Type": "application/json",
"authorization": "Basic " + app_key}
payload = {"app_id": app_id,
"headings": {"en": notification_data['title'],
"fr": notification_data['title']},
"contents": {"en": notification_data['message'],
"fr": notification_data['message']},
"url": notification_data['url']
}
if players_ids != 'all':
payload["include_player_ids"] = players_ids
else:
payload["included_segments"] = ['All']
# if excluded_ids:
# payload["excluded_player_ids"] = excluded_ids
try:
requests.post(
"https://onesignal.com/api/v1/notifications",
headers=header, data=json.dumps(payload), timeout=0.1)
except Exception as error:
log.warning(error)
if recipients != 'all':
players_ids = [getattr(user, 'notification_ids', [])
for user in recipients]
players_ids = [item for sublist in players_ids
for item in sublist]
if players_ids:
excluded_ids = [getattr(user, 'notification_ids', [])
for user in exclude]
excluded_ids = [item for sublist in excluded_ids
for item in sublist]
send_notification(players_ids, excluded_ids)
else:
send_notification('all')
def alert(kind="", senders=[], recipients=[], exclude=[], **kwargs):
alert_op = ALERTS.get(kind, None)
if alert_op:
try:
recipients = list(set(recipients)) if isinstance(recipients, (list, set)) else recipients
return alert_op(senders, recipients, exclude, **kwargs)
except Exception as error:
log.warning(error)
return None
log.warning("Alert kind {kind} not implemented".format(kind=kind))
return None
ALERTS = {
'internal': alert_internal,
# 'slack': alert_slack,
# 'arango': alert_arango,
'email': alert_email,
'sms': alert_sms
}
| agpl-3.0 | -1,736,134,962,268,818,200 | 34.310861 | 101 | 0.559504 | false | 4.136902 | false | false | false |
zmarvel/playground | project6-hy/tests.py | 1 | 4016 | from hashtable import Hashtable
import time
###########################
########## Tests ##########
###########################
some_words = [u'lewes', # => 5
u'mistranscribe', # => 13
u'outbleed', # => 8
u'abstemiously', # => 12
u'antifeudal', # => 10
u'tableaux', # => 8
u'whine', # => 5
u'ytterbite', # => 9
u'redeemer'] # => 8
filename = "words.txt"
print(u'Reading words from file {}.'.format(filename))
most_words = []
start_time = time.time()
with open(filename) as f:
for line in f.readlines():
most_words.append(line.strip())
print(u'Read in {} words in {}s.'.format(len(most_words), time.time()-start_time))
def do_tests(T):
"""Run the tests for the Hashtable class.
For the example hashtable, we're mapping strings to integers. More
specifically, we're mapping words to the number of characters they have,
just for fun. The test function takes a Hashtable of words mapped to their
length, and at the end it adds a lot more of them to it.
"""
print(u'Starting hashtable tests!')
print(u'#####################')
print(u'')
print(u'Initial word list: {}'.format(some_words))
# test the constructor (which also uses __setitem__ and thereby __getitem__)
for word in some_words:
print(u'{} should map to {}.'.format(word, len(word)))
assert T[word] == len(word)
print(u'#####################')
print(u'')
print(u'Testing __setitem__ and __getitem__')
# test __setitem__ and __getitem__ some more
more_words = [u'nummulitic', u'proconviction', u'inscriber']
print(u'Adding more things to the table: {}'.format(more_words))
for word in more_words:
T[word] = len(word)
# make sure the original words are still there
for word in some_words:
print(u'{} should map to {}.'.format(word, len(word)))
assert T[word] == len(word)
# make sure the insertion actually worked
for word in more_words:
print(u'{} should map to {}.'.format(word, len(word)))
assert T[word] == len(word)
print(u'#####################')
print(u'')
# now delete the second list of words
print(u'Testing delete')
for word in more_words:
print(u'Delete key {}'.format(word))
del T[word]
# make sure the words in more_words aren't keys anymore
keys = T.keys()
print(u'Current list of keys: {}'.format(keys))
for word in more_words:
assert word not in keys
print(u'#####################')
print(u'')
# let's put them back in
for word in more_words:
print(u'Re-adding {}.'.format(word))
T[word] = len(word)
# make sure the list of keys contains all the words from both lists
keys = T.keys()
print(u'Current list of keys: {}'.format(keys))
for word in some_words:
assert word in keys
for word in more_words:
assert word in keys
print(u'#####################')
print(u'')
print(u'Now, let\'s make the table REALLY big!')
print(u'(In other words, let\'s test double() and quarter().)')
print(u'#####################')
print(u'')
print(u'Putting a bunch of words in the hashtable.')
start_time = time.time()
for word in most_words:
T[word] = len(word)
print(u'{} words inserted successfully in {}s.'.format(\
len(most_words),
time.time()-start_time))
print(u'Checking that the words and their values are actually there.')
for word in most_words:
l = len(word)
print(u'{}: {}'.format(word, l))
assert T[word] == l
print(u'Deleting a lot of items.')
for i, key in enumerate(T.keys()):
if i > 800:
break
else:
del T[key]
print(u'All tests passed!')
| mit | 9,035,777,160,325,980,000 | 33.324786 | 82 | 0.534363 | false | 3.753271 | true | false | false |
joshua-cogliati-inl/raven | rook/XMLDiff.py | 1 | 19356 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This implements a test to compare two XML files.
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import sys
import os
import xml.etree.ElementTree as ET
from Tester import Differ
import DiffUtils as DU
#cswf Defined because otherwise lines of code get too long.
cswf = DU.compare_strings_with_floats
numTol = 1e-10 #effectively zero for our purposes
def find_branches(node, path, finished):
"""
Iterative process to convert XML tree into list of entries
@ In, node, ET.Element, whose children need sorting
@ In, path, list(ET.Element), leading to node
@ In, finished, list(list(ET.Element)), full entries
@ Out, finished, list(list(ET.Element)), of full entries
"""
for child in node:
npath = path[:]+[child]
if len(child) == 0:
finished.append(npath)
else:
finished = find_branches(child, npath, finished)
return finished
def tree_to_list(node):
"""
Converts XML tree to list of entries. Useful to start recursive search.
@ In, node, ET.Element, the xml tree root node to convert
@ Out, tree_to_list, list(list(ET.Element)), of full paths to entries in xml tree
"""
flattened = find_branches(node, [node], [])
return list(tuple(f) for f in flattened)
def compare_list_entry(a_list, b_list, **kwargs):
"""
Comparse flattened XML entries for equality
return bool is True if all tag, text, and attributes match, False otherwise
return qual is percent of matching terms
@ In, a_list, list(ET.Element), first set
@ In, b_list, list(ET.Element), second set
@ Out, compare_list_entry, (bool,val), results
"""
num_match = 0 #number of matching points between entries
total_matchable = 0 #total tag, text, and attributes available to match
match = True #True if entries match
diff = [] #tuple of (element, diff code, correct (a) value, test (b) value)
options = kwargs
for i in range(len(a_list)):
if i > len(b_list) - 1:
match = False
diff.append((b_list[-1], XMLDiff.missingChildNode, a_list[i].tag, None))
#could have matched the tag and attributes
total_matchable += 1 + len(a_list[i].attrib.keys())
#if text isn't empty, could have matched text, too
if a_list[i].text is not None and len(a_list[i].text.strip()) > 0:
total_matchable += 1
continue
a_item = a_list[i]
b_item = b_list[i]
#match tag
same, _ = cswf(a_item.tag, b_item.tag,
rel_err=options["rel_err"],
zero_threshold=options["zero_threshold"],
remove_whitespace=options["remove_whitespace"],
remove_unicode_identifier=options["remove_unicode_identifier"])
total_matchable += 1
if not same:
match = False
diff.append((b_item, XMLDiff.notMatchTag, a_item.tag, b_item.tag))
else:
num_match += 1
#match text
#if (a_item.text is None or len(a_item.text)>0) and (b_item.text is None or len(b_item.text)>0):
same, _ = cswf(a_item.text,
b_item.text,
rel_err=options["rel_err"],
zero_threshold=options["zero_threshold"],
remove_whitespace=options["remove_whitespace"],
remove_unicode_identifier=options["remove_unicode_identifier"])
if not same:
match = False
diff.append((b_item, XMLDiff.notMatchText, str(a_item.text), str(b_item.text)))
total_matchable += 1
else:
if not(a_item.text is None or a_item.text.strip() != ''):
num_match += 1
total_matchable += 1
#match attributes
for attrib in a_item.attrib.keys():
total_matchable += 1
if attrib not in b_item.attrib.keys():
match = False
diff.append((b_item, XMLDiff.missingAttribute, attrib, None))
continue
same, _ = cswf(a_item.attrib[attrib],
b_item.attrib[attrib],
rel_err=options["rel_err"],
zero_threshold=options["zero_threshold"],
remove_whitespace=options["remove_whitespace"],
remove_unicode_identifier=options["remove_unicode_identifier"])
if not same:
match = False
diff.append((b_item, XMLDiff.notMatchAttribute, (a_item, attrib), (b_item, attrib)))
else:
num_match += 1
#note attributes in b_item not in a_item
for attrib in b_item.attrib.keys():
if attrib not in a_item.attrib.keys():
match = False
diff.append((b_item, XMLDiff.extraAttribute, attrib, None))
total_matchable += 1
# note elements in b not in a
if len(b_list) > len(a_list):
match = False
i = len(a_list) - 1
for j in range(i, len(b_list)):
diff.append((a_list[-1], XMLDiff.extraChildNode, b_list[j].tag, None))
#count tag and attributes
total_matchable += 1 + len(b_list[j].attrib.keys())
#if text isn't empty, count text, too
if b_list[i].text is not None and len(b_list[i].text.strip()) > 0:
total_matchable += 1
return (match, float(num_match)/float(total_matchable), diff)
def compare_unordered_element(a_element, b_element, **kwargs):
"""
Compares two element trees and returns (same,message)
where same is true if they are the same,
and message is a list of the differences.
Uses list of tree entries to find best match, instead of climbing the tree
@ In, a_element, ET.Element, the first element
@ In, b_element, ET.Element, the second element
@ Out, compare_unordered_element, (bool,[string]), results of comparison
"""
same = True
message = []
options = kwargs
matchvals = {}
diffs = {}
DU.set_default_options(options)
def fail_message(*args):
"""
adds the fail message to the list
@ In, args, list, The arguments to the fail message (will be converted with str())
@ Out, fail_message, (bool,string), results
"""
print_args = []
print_args.extend(args)
args_expanded = " ".join([str(x) for x in print_args])
message.append(args_expanded)
if a_element.text != b_element.text:
succeeded, note = cswf(a_element.text,
b_element.text,
rel_err=options["rel_err"],
zero_threshold=options["zero_threshold"],
remove_whitespace=options["remove_whitespace"],
remove_unicode_identifier=options["remove_unicode_identifier"])
if not succeeded:
same = False
fail_message(note)
return (same, message)
a_list = tree_to_list(a_element)
b_list = tree_to_list(b_element)
#search a for matches in b
for a_entry in a_list:
matchvals[a_entry] = {}
diffs[a_entry] = {}
for b_entry in b_list:
same, matchval, diff = compare_list_entry(a_entry, b_entry, **options)
if same:
b_list.remove(b_entry)
del matchvals[a_entry]
del diffs[a_entry]
#since we found the match, remove from other near matches
for close_key in diffs:
if b_entry in diffs[close_key].keys():
del diffs[close_key][b_entry]
del matchvals[close_key][b_entry]
break
matchvals[a_entry][b_entry] = matchval
diffs[a_entry][b_entry] = diff
if len(matchvals) == 0: #all matches found
return (True, '')
note = ''
for unmatched, close in matchvals.items():
#print the path without a match
path = '/'.join(list(m.tag for m in unmatched))
note += 'No match for gold node {}\n'.format(path)
note += ' tag: {}\n'.format(unmatched[-1].tag)
note += ' attr: {}\n'.format(unmatched[-1].attrib)
note += ' text: {}\n'.format(unmatched[-1].text)
#print the tree of the nearest match
note += ' Nearest unused match: '
close = sorted(list(close.items()), key=lambda x: x[1], reverse=True)
if close:
closest = '/'.join(list(c.tag for c in close[0][0]))
else:
closest = '-none found-'
note += ' '+ closest +'\n'
#print what was different between them
if len(close):
diff = diffs[unmatched][close[0][0]]
for b_diff, code, right, miss in diff:
if b_diff is None:
b_diff = str(b_diff)
if code is None:
code = str(code)
if right is None:
right = str(right)
if miss is None:
miss = str(miss)
if code == XMLDiff.missingChildNode:
note += ' <'+b_diff.tag+'> is missing child node: <'+right+'> vs <'+miss+'>\n'
elif code == XMLDiff.missingAttribute:
note += ' <'+b_diff.tag+'> is missing attribute: "'+right+'"\n'
elif code == XMLDiff.extraChildNode:
note += ' <'+b_diff.tag+'> has extra child node: <'+right+'>\n'
elif code == XMLDiff.extraAttribute:
note += ' <'+b_diff.tag+'> has extra attribute: "'+right+\
'" = "'+b_diff.attrib[right]+'"\n'
elif code == XMLDiff.notMatchTag:
note += ' <'+b_diff.tag+'> tag does not match: <'+right+'> vs <'+miss+'>\n'
elif code == XMLDiff.notMatchAttribute:
note += ' <'+b_diff.tag+'> attribute does not match: "'+right[1]+\
'" = "'+right[0].attrib[right[1]]+'" vs "'+miss[0].attrib[miss[1]]+'"\n'
elif code == XMLDiff.notMatchText:
note += ' <'+b_diff.tag+'> text does not match: "'+right+'" vs "'+miss+'"\n'
else:
note += ' UNRECOGNIZED OPTION: "'+b_diff.tag+'" "'+str(code)+\
'": "'+str(right)+'" vs "'+str(miss)+'"\n'
return (False, [note])
def compare_ordered_element(a_element, b_element, *args, **kwargs):
"""
Compares two element trees and returns (same,message) where same is true
if they are the same, and message is a list of the differences
@ In, a_element, ET.Element, the first element tree
@ In, b_element, ET.Element, the second element tree
@ In, args, dict, arguments
@ In, kwargs, dict, keyword arguments
accepted args:
- none -
accepted kwargs:
path: a string to describe where the element trees are located (mainly
used recursively)
@ Out, compare_ordered_element, (bool,[string]), results of comparison
"""
same = True
message = []
options = kwargs
path = kwargs.get('path', '')
counter = kwargs.get('counter', 0)
DU.set_default_options(options)
def fail_message(*args):
"""
adds the fail message to the list
@ In, args, list, The arguments to the fail message (will be converted with str())
@ Out, fail_message, (bool,string), results
"""
print_args = [path]
print_args.extend(args)
args_expanded = " ".join([str(x) for x in print_args])
message.append(args_expanded)
if a_element.tag != b_element.tag:
same = False
fail_message("mismatch tags ", a_element.tag, b_element.tag)
else:
path += a_element.tag + "/"
if a_element.text != b_element.text:
succeeded, note = cswf(a_element.text,
b_element.text,
rel_err=options["rel_err"],
zero_threshold=options["zero_threshold"],
remove_whitespace=options["remove_whitespace"],
remove_unicode_identifier=options["remove_unicode_identifier"])
if not succeeded:
same = False
fail_message(note)
return (same, message)
different_keys = set(a_element.keys()).symmetric_difference(set(b_element.keys()))
same_keys = set(a_element.keys()).intersection(set(b_element.keys()))
if len(different_keys) != 0:
same = False
fail_message("mismatch attribute keys ", different_keys)
for key in same_keys:
if a_element.attrib[key] != b_element.attrib[key]:
same = False
fail_message("mismatch attribute ", key, a_element.attrib[key], b_element.attrib[key])
if len(a_element) != len(b_element):
same = False
fail_message("mismatch number of children ", len(a_element), len(b_element))
else:
if a_element.tag == b_element.tag:
#find all matching XML paths
#WARNING: this will mangle the XML, so other testing should happen above this!
found = []
for i in range(len(a_element)):
sub_options = dict(options)
sub_options["path"] = path
(same_child, _) = compare_ordered_element(a_element[i], b_element[i], *args, **sub_options)
if same_child:
found.append((a_element[i], b_element[i]))
same = same and same_child
#prune matches from trees
for children in found:
a_element.remove(children[0])
b_element.remove(children[1])
#once all pruning done, error on any remaining structure
if counter == 0: #on head now, recursion is finished
if len(a_element) > 0:
a_string = ET.tostring(a_element)
if len(a_string) > 80:
message.append('Branches in gold not matching test...\n{}'.format(path))
else:
message.append('Branches in gold not matching test...\n{} {}'.format(path, a_string))
if len(b_element) > 0:
b_string = ET.tostring(b_element)
if len(b_string) > 80:
message.append('Branches in test not matching gold...\n{}'.format(path))
else:
message.append('Branches in test not matching gold...\n{} {}'.format(path, b_string))
return (same, message)
class XMLDiff:
"""
XMLDiff is used for comparing xml files.
"""
#static codes for differences
missingChildNode = 0
missingAttribute = 1
extraChildNode = 2
extraAttribute = 3
notMatchTag = 4
notMatchAttribute = 5
notMatchText = 6
def __init__(self, out_files, gold_files, **kwargs):
"""
Create an XMLDiff class
@ In, testDir, string, the directory where the test takes place
@ In, out_files, List(string), the files to be compared.
@ In, gold_files, List(String), the gold files to be compared.
@ In, kwargs, dict, other arguments that may be included:
- 'unordered': indicates unordered sorting
@ Out, None
"""
assert len(out_files) == len(gold_files)
self.__out_files = out_files
self.__gold_files = gold_files
self.__messages = ""
self.__same = True
self.__options = kwargs
def diff(self):
"""
Run the comparison.
@ In, None
@ Out, diff, (bool,string), (same,messages) where same is true if all
the xml files are the same, and messages is a string with all the
differences.
"""
# read in files
for test_filename, gold_filename in zip(self.__out_files, self.__gold_files):
if not os.path.exists(test_filename):
self.__same = False
self.__messages += 'Test file does not exist: '+test_filename
elif not os.path.exists(gold_filename):
self.__same = False
self.__messages += 'Gold file does not exist: '+gold_filename
else:
files_read = True
try:
test_root = ET.parse(test_filename).getroot()
except Exception as exp:
files_read = False
self.__messages += 'Exception reading file '+test_filename+': '+str(exp.args)
try:
gold_root = ET.parse(gold_filename).getroot()
except Exception as exp:
files_read = False
self.__messages += 'Exception reading file '+gold_filename+': '+str(exp.args)
if files_read:
if 'unordered' in self.__options.keys() and self.__options['unordered']:
same, messages = compare_unordered_element(gold_root, test_root, **self.__options)
else:
same, messages = compare_ordered_element(test_root, gold_root, **self.__options)
if not same:
self.__same = False
separator = "\n"+" "*4
self.__messages += "Mismatch between "+test_filename+" and "+gold_filename+separator
self.__messages += separator.join(messages) + "\n"
else:
self.__same = False
if '[' in self.__messages or ']' in self.__messages:
self.__messages = self.__messages.replace('[', '(')
self.__messages = self.__messages.replace(']', ')')
return (self.__same, self.__messages)
class XML(Differ):
"""
This is the class to use for handling the XML block.
"""
@staticmethod
def get_valid_params():
"""
Return the valid parameters for this class.
@ In, None
@ Out, params, _ValidParameters, return the parameters.
"""
params = Differ.get_valid_params()
params.add_param('unordered', False, 'if true allow the tags in any order')
params.add_param('zero_threshold', sys.float_info.min*4.0, 'it represents '
+'the value below which a float is considered zero (XML comparison only)')
params.add_param('remove_whitespace', False,
'Removes whitespace before comparing xml node text if True')
params.add_param('remove_unicode_identifier', False,
'if true, then remove u infront of a single quote')
params.add_param('xmlopts', '', "Options for xml checking")
params.add_param('rel_err', '', 'Relative Error for csv files or floats in xml ones')
return params
def __init__(self, name, params, test_dir):
"""
Initializer for the class. Takes a String name and a dictionary params
@ In, name, string, name of the test.
@ In, params, dictionary, parameters for the class
@ In, test_dir, string, path to the test.
@ Out, None.
"""
Differ.__init__(self, name, params, test_dir)
self.__xmlopts = {}
if len(self.specs["rel_err"]) > 0:
self.__xmlopts['rel_err'] = float(self.specs["rel_err"])
self.__xmlopts['zero_threshold'] = float(self.specs["zero_threshold"])
self.__xmlopts['unordered'] = bool(self.specs["unordered"])
self.__xmlopts['remove_whitespace'] = bool(self.specs['remove_whitespace'])
self.__xmlopts['remove_unicode_identifier'] = self.specs['remove_unicode_identifier']
if len(self.specs['xmlopts']) > 0:
self.__xmlopts['xmlopts'] = self.specs['xmlopts'].split(' ')
def check_output(self):
"""
Checks that the output matches the gold.
returns (same, message) where same is true if the
test passes, or false if the test failes. message should
gives a human readable explaination of the differences.
@ In, None
@ Out, (same, message), same is true if the tests passes.
"""
xml_files = self._get_test_files()
gold_files = self._get_gold_files()
xml_diff = XMLDiff(xml_files, gold_files, **self.__xmlopts)
return xml_diff.diff()
| apache-2.0 | 3,820,883,646,590,924,000 | 39.493724 | 100 | 0.606685 | false | 3.691076 | true | false | false |
openatv/enigma2 | lib/python/Plugins/Extensions/FileCommander/addons/dmnapi.py | 3 | 1533 | #!/usr/bin/python -u
# -*- coding: UTF-8 -*-
# napiprojekt.pl API is used with napiproject administration consent
import re
import os
import os.path
import sys
import dmnapim
def get_all(file, supplement):
rex = re.compile('.*\\.%s$' % file[-3:], re.I)
(dir, fname) = os.path.split(file)
for f in os.listdir(dir):
if os.path.exists(os.path.join(dir, f[:-4] + '.srt')) and supplement:
pass
else:
if rex.match(f):
try:
dmnapim.get_sub_from_napi(os.path.join(dir, f))
except:
print " Error: %s" % (sys.exc_info()[1])
try:
# opt fps videofile [subtitlefile]
opt = sys.argv[1]
try:
fps = float(sys.argv[2]) / 1000
except:
fps = 0
if opt == "get":
file = os.path.abspath(sys.argv[3])
dmnapim.get_sub_from_napi(file, fps=fps)
elif opt == "all" or opt == 'allnew':
file = os.path.abspath(sys.argv[3])
get_all(file, opt == "allnew")
elif opt == "convert":
file = os.path.abspath(sys.argv[3])
dmnapim.convert(file, sys.argv[4], fps=fps)
elif opt == "upgrade":
file = sys.argv[2]
x, ipk = os.path.split(file)
if os.path.exists("/usr/bin/opkg"):
do = "opkg install " + ipk
else:
do = "ipkg install " + ipk
print "Upgrade to:\n", file, "\n"
os.system("cd /tmp ; rm -f enigma2-plugin-extensions-dmnapi*.ipk ; opkg update && wget -c %s && ls -al enigma2-plugin-extensions-dmnapi*.ipk && %s" % (file, do))
elif opt == "n24":
file = os.path.abspath(sys.argv[3])
dmnapim.get_sub_from_n24(file, sys.argv[4], fps=fps)
except:
print " Error: %s" % (sys.exc_info()[1])
| gpl-2.0 | -2,545,383,978,789,383,700 | 26.872727 | 163 | 0.621657 | false | 2.410377 | false | false | false |
itsvetkov/pyqtgraph | pyqtgraph/debug.py | 1 | 36061 | # -*- coding: utf-8 -*-
"""
debug.py - Functions to aid in debugging
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
from __future__ import print_function
import sys, traceback, time, gc, re, types, weakref, inspect, os, cProfile, threading
from . import ptime
from numpy import ndarray
from .Qt import QtCore, QtGui
from .util.mutex import Mutex
from .util import cprint
__ftraceDepth = 0
def ftrace(func):
"""Decorator used for marking the beginning and end of function calls.
Automatically indents nested calls.
"""
def w(*args, **kargs):
global __ftraceDepth
pfx = " " * __ftraceDepth
print(pfx + func.__name__ + " start")
__ftraceDepth += 1
try:
rv = func(*args, **kargs)
finally:
__ftraceDepth -= 1
print(pfx + func.__name__ + " done")
return rv
return w
def warnOnException(func):
"""Decorator which catches/ignores exceptions and prints a stack trace."""
def w(*args, **kwds):
try:
func(*args, **kwds)
except:
printExc('Ignored exception:')
return w
def getExc(indent=4, prefix='| '):
tb = traceback.format_exc()
lines = []
for l in tb.split('\n'):
lines.append(" "*indent + prefix + l)
return '\n'.join(lines)
def printExc(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented exception backtrace
(This function is intended to be called within except: blocks)"""
exc = getExc(indent, prefix + ' ')
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
print(exc)
print(" "*indent + prefix + '='*30 + '<<')
def printTrace(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented stack trace"""
trace = backtrace(1)
#exc = getExc(indent, prefix + ' ')
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
for line in trace.split('\n'):
print(" "*indent + prefix + " " + line)
print(" "*indent + prefix + '='*30 + '<<')
def backtrace(skip=0):
return ''.join(traceback.format_stack()[:-(skip+1)])
def listObjs(regex='Q', typ=None):
"""List all objects managed by python gc with class name matching regex.
Finds 'Q...' classes by default."""
if typ is not None:
return [x for x in gc.get_objects() if isinstance(x, typ)]
else:
return [x for x in gc.get_objects() if re.match(regex, type(x).__name__)]
def findRefPath(startObj, endObj, maxLen=8, restart=True, seen={}, path=None, ignore=None):
"""Determine all paths of object references from startObj to endObj"""
refs = []
if path is None:
path = [endObj]
if ignore is None:
ignore = {}
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
ignore[id(seen)] = None
prefix = " "*(8-maxLen)
#print prefix + str(map(type, path))
prefix += " "
if restart:
#gc.collect()
seen.clear()
gc.collect()
newRefs = [r for r in gc.get_referrers(endObj) if id(r) not in ignore]
ignore[id(newRefs)] = None
#fo = allFrameObjs()
#newRefs = []
#for r in gc.get_referrers(endObj):
#try:
#if r not in fo:
#newRefs.append(r)
#except:
#newRefs.append(r)
for r in newRefs:
#print prefix+"->"+str(type(r))
if type(r).__name__ in ['frame', 'function', 'listiterator']:
#print prefix+" FRAME"
continue
try:
if any([r is x for x in path]):
#print prefix+" LOOP", objChainString([r]+path)
continue
except:
print(r)
print(path)
raise
if r is startObj:
refs.append([r])
print(refPathString([startObj]+path))
continue
if maxLen == 0:
#print prefix+" END:", objChainString([r]+path)
continue
## See if we have already searched this node.
## If not, recurse.
tree = None
try:
cache = seen[id(r)]
if cache[0] >= maxLen:
tree = cache[1]
for p in tree:
print(refPathString(p+path))
except KeyError:
pass
ignore[id(tree)] = None
if tree is None:
tree = findRefPath(startObj, r, maxLen-1, restart=False, path=[r]+path, ignore=ignore)
seen[id(r)] = [maxLen, tree]
## integrate any returned results
if len(tree) == 0:
#print prefix+" EMPTY TREE"
continue
else:
for p in tree:
refs.append(p+[r])
#seen[id(r)] = [maxLen, refs]
return refs
def objString(obj):
"""Return a short but descriptive string for any object"""
try:
if type(obj) in [int, float]:
return str(obj)
elif isinstance(obj, dict):
if len(obj) > 5:
return "<dict {%s,...}>" % (",".join(list(obj.keys())[:5]))
else:
return "<dict {%s}>" % (",".join(list(obj.keys())))
elif isinstance(obj, str):
if len(obj) > 50:
return '"%s..."' % obj[:50]
else:
return obj[:]
elif isinstance(obj, ndarray):
return "<ndarray %s %s>" % (str(obj.dtype), str(obj.shape))
elif hasattr(obj, '__len__'):
if len(obj) > 5:
return "<%s [%s,...]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj[:5]]))
else:
return "<%s [%s]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj]))
else:
return "<%s %s>" % (type(obj).__name__, obj.__class__.__name__)
except:
return str(type(obj))
def refPathString(chain):
"""Given a list of adjacent objects in a reference path, print the 'natural' path
names (ie, attribute names, keys, and indexes) that follow from one object to the next ."""
s = objString(chain[0])
i = 0
while i < len(chain)-1:
#print " -> ", i
i += 1
o1 = chain[i-1]
o2 = chain[i]
cont = False
if isinstance(o1, list) or isinstance(o1, tuple):
if any([o2 is x for x in o1]):
s += "[%d]" % o1.index(o2)
continue
#print " not list"
if isinstance(o2, dict) and hasattr(o1, '__dict__') and o2 == o1.__dict__:
i += 1
if i >= len(chain):
s += ".__dict__"
continue
o3 = chain[i]
for k in o2:
if o2[k] is o3:
s += '.%s' % k
cont = True
continue
#print " not __dict__"
if isinstance(o1, dict):
try:
if o2 in o1:
s += "[key:%s]" % objString(o2)
continue
except TypeError:
pass
for k in o1:
if o1[k] is o2:
s += "[%s]" % objString(k)
cont = True
continue
#print " not dict"
#for k in dir(o1): ## Not safe to request attributes like this.
#if getattr(o1, k) is o2:
#s += ".%s" % k
#cont = True
#continue
#print " not attr"
if cont:
continue
s += " ? "
sys.stdout.flush()
return s
def objectSize(obj, ignore=None, verbose=False, depth=0, recursive=False):
"""Guess how much memory an object is using"""
ignoreTypes = [types.MethodType, types.UnboundMethodType, types.BuiltinMethodType, types.FunctionType, types.BuiltinFunctionType]
ignoreRegex = re.compile('(method-wrapper|Flag|ItemChange|Option|Mode)')
if ignore is None:
ignore = {}
indent = ' '*depth
try:
hash(obj)
hsh = obj
except:
hsh = "%s:%d" % (str(type(obj)), id(obj))
if hsh in ignore:
return 0
ignore[hsh] = 1
try:
size = sys.getsizeof(obj)
except TypeError:
size = 0
if isinstance(obj, ndarray):
try:
size += len(obj.data)
except:
pass
if recursive:
if type(obj) in [list, tuple]:
if verbose:
print(indent+"list:")
for o in obj:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', s)
size += s
elif isinstance(obj, dict):
if verbose:
print(indent+"list:")
for k in obj:
s = objectSize(obj[k], ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', k, s)
size += s
#elif isinstance(obj, QtCore.QObject):
#try:
#childs = obj.children()
#if verbose:
#print indent+"Qt children:"
#for ch in childs:
#s = objectSize(obj, ignore=ignore, verbose=verbose, depth=depth+1)
#size += s
#if verbose:
#print indent + ' +', ch.objectName(), s
#except:
#pass
#if isinstance(obj, types.InstanceType):
gc.collect()
if verbose:
print(indent+'attrs:')
for k in dir(obj):
if k in ['__dict__']:
continue
o = getattr(obj, k)
if type(o) in ignoreTypes:
continue
strtyp = str(type(o))
if ignoreRegex.search(strtyp):
continue
#if isinstance(o, types.ObjectType) and strtyp == "<type 'method-wrapper'>":
#continue
#if verbose:
#print indent, k, '?'
refs = [r for r in gc.get_referrers(o) if type(r) != types.FrameType]
if len(refs) == 1:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
size += s
if verbose:
print(indent + " +", k, s)
#else:
#if verbose:
#print indent + ' -', k, len(refs)
return size
class GarbageWatcher(object):
"""
Convenient dictionary for holding weak references to objects.
Mainly used to check whether the objects have been collect yet or not.
Example:
gw = GarbageWatcher()
gw['objName'] = obj
gw['objName2'] = obj2
gw.check()
"""
def __init__(self):
self.objs = weakref.WeakValueDictionary()
self.allNames = []
def add(self, obj, name):
self.objs[name] = obj
self.allNames.append(name)
def __setitem__(self, name, obj):
self.add(obj, name)
def check(self):
"""Print a list of all watched objects and whether they have been collected."""
gc.collect()
dead = self.allNames[:]
alive = []
for k in self.objs:
dead.remove(k)
alive.append(k)
print("Deleted objects:", dead)
print("Live objects:", alive)
def __getitem__(self, item):
return self.objs[item]
class Profiler(object):
"""Simple profiler allowing measurement of multiple time intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `PYQTGRAPHPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `PYQTGRAPHPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "pyqtgraph." prefix from the module.
"""
_profilers = os.environ.get("PYQTGRAPHPROFILE", None)
_profilers = _profilers.split(",") if _profilers is not None else []
_depth = 0
_msgs = []
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabledProfiler = DisabledProfiler()
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if disabled is True or (disabled=='env' and len(cls._profilers) == 0):
return cls._disabledProfiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[-1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if disabled=='env' and func_qualname not in cls._profilers: # don't do anything
return cls._disabledProfiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
obj._finished = False
obj._firstTime = obj._lastTime = ptime.time()
obj._newMsg("> Entering " + obj._name)
return obj
#else:
#def __new__(cls, delayed=True):
#return lambda msg=None: None
def __call__(self, msg=None):
"""Register or print a new message with timing information.
"""
if msg is None:
msg = str(self._markCount)
self._markCount += 1
newTime = ptime.time()
self._newMsg(" %s: %0.4f ms",
msg, (newTime - self._lastTime) * 1000)
self._lastTime = newTime
def mark(self, msg=None):
self(msg)
def _newMsg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
self.flush()
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished:
return
self._finished = True
if msg is not None:
self(msg)
self._newMsg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
def flush(self):
if self._msgs:
print("\n".join([m[0]%m[1] for m in self._msgs]))
type(self)._msgs = []
def profile(code, name='profile_run', sort='cumulative', num=30):
"""Common-use for cProfile"""
cProfile.run(code, name)
stats = pstats.Stats(name)
stats.sort_stats(sort)
stats.print_stats(num)
return stats
#### Code for listing (nearly) all objects in the known universe
#### http://utcc.utoronto.ca/~cks/space/blog/python/GetAllObjects
# Recursively expand slist's objects
# into olist, using seen to track
# already processed objects.
def _getr(slist, olist, first=True):
i = 0
for e in slist:
oid = id(e)
typ = type(e)
if oid in olist or typ is int: ## or e in olist: ## since we're excluding all ints, there is no longer a need to check for olist keys
continue
olist[oid] = e
if first and (i%1000) == 0:
gc.collect()
tl = gc.get_referents(e)
if tl:
_getr(tl, olist, first=False)
i += 1
# The public function.
def get_all_objects():
"""Return a list of all live Python objects (excluding int and long), not including the list itself."""
gc.collect()
gcl = gc.get_objects()
olist = {}
_getr(gcl, olist)
del olist[id(olist)]
del olist[id(gcl)]
del olist[id(sys._getframe())]
return olist
def lookup(oid, objects=None):
"""Return an object given its ID, if it exists."""
if objects is None:
objects = get_all_objects()
return objects[oid]
class ObjTracker(object):
"""
Tracks all objects under the sun, reporting the changes between snapshots: what objects are created, deleted, and persistent.
This class is very useful for tracking memory leaks. The class goes to great (but not heroic) lengths to avoid tracking
its own internal objects.
Example:
ot = ObjTracker() # takes snapshot of currently existing objects
... do stuff ...
ot.diff() # prints lists of objects created and deleted since ot was initialized
... do stuff ...
ot.diff() # prints lists of objects created and deleted since last call to ot.diff()
# also prints list of items that were created since initialization AND have not been deleted yet
# (if done correctly, this list can tell you about objects that were leaked)
arrays = ot.findPersistent('ndarray') ## returns all objects matching 'ndarray' (string match, not instance checking)
## that were considered persistent when the last diff() was run
describeObj(arrays[0]) ## See if we can determine who has references to this array
"""
allObjs = {} ## keep track of all objects created and stored within class instances
allObjs[id(allObjs)] = None
def __init__(self):
self.startRefs = {} ## list of objects that exist when the tracker is initialized {oid: weakref}
## (If it is not possible to weakref the object, then the value is None)
self.startCount = {}
self.newRefs = {} ## list of objects that have been created since initialization
self.persistentRefs = {} ## list of objects considered 'persistent' when the last diff() was called
self.objTypes = {}
ObjTracker.allObjs[id(self)] = None
self.objs = [self.__dict__, self.startRefs, self.startCount, self.newRefs, self.persistentRefs, self.objTypes]
self.objs.append(self.objs)
for v in self.objs:
ObjTracker.allObjs[id(v)] = None
self.start()
def findNew(self, regex):
"""Return all objects matching regex that were considered 'new' when the last diff() was run."""
return self.findTypes(self.newRefs, regex)
def findPersistent(self, regex):
"""Return all objects matching regex that were considered 'persistent' when the last diff() was run."""
return self.findTypes(self.persistentRefs, regex)
def start(self):
"""
Remember the current set of objects as the comparison for all future calls to diff()
Called automatically on init, but can be called manually as well.
"""
refs, count, objs = self.collect()
for r in self.startRefs:
self.forgetRef(self.startRefs[r])
self.startRefs.clear()
self.startRefs.update(refs)
for r in refs:
self.rememberRef(r)
self.startCount.clear()
self.startCount.update(count)
#self.newRefs.clear()
#self.newRefs.update(refs)
def diff(self, **kargs):
"""
Compute all differences between the current object set and the reference set.
Print a set of reports for created, deleted, and persistent objects
"""
refs, count, objs = self.collect() ## refs contains the list of ALL objects
## Which refs have disappeared since call to start() (these are only displayed once, then forgotten.)
delRefs = {}
for i in self.startRefs.keys():
if i not in refs:
delRefs[i] = self.startRefs[i]
del self.startRefs[i]
self.forgetRef(delRefs[i])
for i in self.newRefs.keys():
if i not in refs:
delRefs[i] = self.newRefs[i]
del self.newRefs[i]
self.forgetRef(delRefs[i])
#print "deleted:", len(delRefs)
## Which refs have appeared since call to start() or diff()
persistentRefs = {} ## created since start(), but before last diff()
createRefs = {} ## created since last diff()
for o in refs:
if o not in self.startRefs:
if o not in self.newRefs:
createRefs[o] = refs[o] ## object has been created since last diff()
else:
persistentRefs[o] = refs[o] ## object has been created since start(), but before last diff() (persistent)
#print "new:", len(newRefs)
## self.newRefs holds the entire set of objects created since start()
for r in self.newRefs:
self.forgetRef(self.newRefs[r])
self.newRefs.clear()
self.newRefs.update(persistentRefs)
self.newRefs.update(createRefs)
for r in self.newRefs:
self.rememberRef(self.newRefs[r])
#print "created:", len(createRefs)
## self.persistentRefs holds all objects considered persistent.
self.persistentRefs.clear()
self.persistentRefs.update(persistentRefs)
print("----------- Count changes since start: ----------")
c1 = count.copy()
for k in self.startCount:
c1[k] = c1.get(k, 0) - self.startCount[k]
typs = list(c1.keys())
typs.sort(lambda a,b: cmp(c1[a], c1[b]))
for t in typs:
if c1[t] == 0:
continue
num = "%d" % c1[t]
print(" " + num + " "*(10-len(num)) + str(t))
print("----------- %d Deleted since last diff: ------------" % len(delRefs))
self.report(delRefs, objs, **kargs)
print("----------- %d Created since last diff: ------------" % len(createRefs))
self.report(createRefs, objs, **kargs)
print("----------- %d Created since start (persistent): ------------" % len(persistentRefs))
self.report(persistentRefs, objs, **kargs)
def __del__(self):
self.startRefs.clear()
self.startCount.clear()
self.newRefs.clear()
self.persistentRefs.clear()
del ObjTracker.allObjs[id(self)]
for v in self.objs:
del ObjTracker.allObjs[id(v)]
@classmethod
def isObjVar(cls, o):
return type(o) is cls or id(o) in cls.allObjs
def collect(self):
print("Collecting list of all objects...")
gc.collect()
objs = get_all_objects()
frame = sys._getframe()
del objs[id(frame)] ## ignore the current frame
del objs[id(frame.f_code)]
ignoreTypes = [int]
refs = {}
count = {}
for k in objs:
o = objs[k]
typ = type(o)
oid = id(o)
if ObjTracker.isObjVar(o) or typ in ignoreTypes:
continue
try:
ref = weakref.ref(obj)
except:
ref = None
refs[oid] = ref
typ = type(o)
typStr = typeStr(o)
self.objTypes[oid] = typStr
ObjTracker.allObjs[id(typStr)] = None
count[typ] = count.get(typ, 0) + 1
print("All objects: %d Tracked objects: %d" % (len(objs), len(refs)))
return refs, count, objs
def forgetRef(self, ref):
if ref is not None:
del ObjTracker.allObjs[id(ref)]
def rememberRef(self, ref):
## Record the address of the weakref object so it is not included in future object counts.
if ref is not None:
ObjTracker.allObjs[id(ref)] = None
def lookup(self, oid, ref, objs=None):
if ref is None or ref() is None:
try:
obj = lookup(oid, objects=objs)
except:
obj = None
else:
obj = ref()
return obj
def report(self, refs, allobjs=None, showIDs=False):
if allobjs is None:
allobjs = get_all_objects()
count = {}
rev = {}
for oid in refs:
obj = self.lookup(oid, refs[oid], allobjs)
if obj is None:
typ = "[del] " + self.objTypes[oid]
else:
typ = typeStr(obj)
if typ not in rev:
rev[typ] = []
rev[typ].append(oid)
c = count.get(typ, [0,0])
count[typ] = [c[0]+1, c[1]+objectSize(obj)]
typs = list(count.keys())
typs.sort(lambda a,b: cmp(count[a][1], count[b][1]))
for t in typs:
line = " %d\t%d\t%s" % (count[t][0], count[t][1], t)
if showIDs:
line += "\t"+",".join(map(str,rev[t]))
print(line)
def findTypes(self, refs, regex):
allObjs = get_all_objects()
ids = {}
objs = []
r = re.compile(regex)
for k in refs:
if r.search(self.objTypes[k]):
objs.append(self.lookup(k, refs[k], allObjs))
return objs
def describeObj(obj, depth=4, path=None, ignore=None):
"""
Trace all reference paths backward, printing a list of different ways this object can be accessed.
Attempts to answer the question "who has a reference to this object"
"""
if path is None:
path = [obj]
if ignore is None:
ignore = {} ## holds IDs of objects used within the function.
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
printed=False
for ref in refs:
if id(ref) in ignore:
continue
if id(ref) in list(map(id, path)):
print("Cyclic reference: " + refPathString([ref]+path))
printed = True
continue
newPath = [ref]+path
if len(newPath) >= depth:
refStr = refPathString(newPath)
if '[_]' not in refStr: ## ignore '_' references generated by the interactive shell
print(refStr)
printed = True
else:
describeObj(ref, depth, newPath, ignore)
printed = True
if not printed:
print("Dead end: " + refPathString(path))
def typeStr(obj):
"""Create a more useful type string by making <instance> types report their class."""
typ = type(obj)
if typ == types.InstanceType:
return "<instance of %s>" % obj.__class__.__name__
else:
return str(typ)
def searchRefs(obj, *args):
"""Pseudo-interactive function for tracing references backward.
**Arguments:**
obj: The initial object from which to start searching
args: A set of string or int arguments.
each integer selects one of obj's referrers to be the new 'obj'
each string indicates an action to take on the current 'obj':
t: print the types of obj's referrers
l: print the lengths of obj's referrers (if they have __len__)
i: print the IDs of obj's referrers
o: print obj
ro: return obj
rr: return list of obj's referrers
Examples::
searchRefs(obj, 't') ## Print types of all objects referring to obj
searchRefs(obj, 't', 0, 't') ## ..then select the first referrer and print the types of its referrers
searchRefs(obj, 't', 0, 't', 'l') ## ..also print lengths of the last set of referrers
searchRefs(obj, 0, 1, 'ro') ## Select index 0 from obj's referrer, then select index 1 from the next set of referrers, then return that object
"""
ignore = {id(sys._getframe()): None}
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
for a in args:
#fo = allFrameObjs()
#refs = [r for r in refs if r not in fo]
if type(a) is int:
obj = refs[a]
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
elif a == 't':
print(list(map(typeStr, refs)))
elif a == 'i':
print(list(map(id, refs)))
elif a == 'l':
def slen(o):
if hasattr(o, '__len__'):
return len(o)
else:
return None
print(list(map(slen, refs)))
elif a == 'o':
print(obj)
elif a == 'ro':
return obj
elif a == 'rr':
return refs
def allFrameObjs():
"""Return list of frame objects in current stack. Useful if you want to ignore these objects in refernece searches"""
f = sys._getframe()
objs = []
while f is not None:
objs.append(f)
objs.append(f.f_code)
#objs.append(f.f_locals)
#objs.append(f.f_globals)
#objs.append(f.f_builtins)
f = f.f_back
return objs
def findObj(regex):
"""Return a list of objects whose typeStr matches regex"""
allObjs = get_all_objects()
objs = []
r = re.compile(regex)
for i in allObjs:
obj = allObjs[i]
if r.search(typeStr(obj)):
objs.append(obj)
return objs
def listRedundantModules():
"""List modules that have been imported more than once via different paths."""
mods = {}
for name, mod in sys.modules.items():
if not hasattr(mod, '__file__'):
continue
mfile = os.path.abspath(mod.__file__)
if mfile[-1] == 'c':
mfile = mfile[:-1]
if mfile in mods:
print("module at %s has 2 names: %s, %s" % (mfile, name, mods[mfile]))
else:
mods[mfile] = name
def walkQObjectTree(obj, counts=None, verbose=False, depth=0):
"""
Walk through a tree of QObjects, doing nothing to them.
The purpose of this function is to find dead objects and generate a crash
immediately rather than stumbling upon them later.
Prints a count of the objects encountered, for fun. (or is it?)
"""
if verbose:
print(" "*depth + typeStr(obj))
report = False
if counts is None:
counts = {}
report = True
typ = str(type(obj))
try:
counts[typ] += 1
except KeyError:
counts[typ] = 1
for child in obj.children():
walkQObjectTree(child, counts, verbose, depth+1)
return counts
QObjCache = {}
def qObjectReport(verbose=False):
"""Generate a report counting all QObjects and their types"""
global qObjCache
count = {}
for obj in findObj('PyQt'):
if isinstance(obj, QtCore.QObject):
oid = id(obj)
if oid not in QObjCache:
QObjCache[oid] = typeStr(obj) + " " + obj.objectName()
try:
QObjCache[oid] += " " + obj.parent().objectName()
QObjCache[oid] += " " + obj.text()
except:
pass
print("check obj", oid, str(QObjCache[oid]))
if obj.parent() is None:
walkQObjectTree(obj, count, verbose)
typs = list(count.keys())
typs.sort()
for t in typs:
print(count[t], "\t", t)
class PrintDetector(object):
def __init__(self):
self.stdout = sys.stdout
sys.stdout = self
def remove(self):
sys.stdout = self.stdout
def __del__(self):
self.remove()
def write(self, x):
self.stdout.write(x)
traceback.print_stack()
def flush(self):
self.stdout.flush()
class PeriodicTrace(object):
"""
Used to debug freezing by starting a new thread that reports on the
location of the main thread periodically.
"""
class ReportThread(QtCore.QThread):
def __init__(self):
self.frame = None
self.ind = 0
self.lastInd = None
self.lock = Mutex()
QtCore.QThread.__init__(self)
def notify(self, frame):
with self.lock:
self.frame = frame
self.ind += 1
def run(self):
while True:
time.sleep(1)
with self.lock:
if self.lastInd != self.ind:
print("== Trace %d: ==" % self.ind)
traceback.print_stack(self.frame)
self.lastInd = self.ind
def __init__(self):
self.mainThread = threading.current_thread()
self.thread = PeriodicTrace.ReportThread()
self.thread.start()
sys.settrace(self.trace)
def trace(self, frame, event, arg):
if threading.current_thread() is self.mainThread: # and 'threading' not in frame.f_code.co_filename:
self.thread.notify(frame)
# print("== Trace ==", event, arg)
# traceback.print_stack(frame)
return self.trace
class ThreadColor(object):
"""
Wrapper on stdout/stderr that colors text by the current thread ID.
*stream* must be 'stdout' or 'stderr'.
"""
colors = {}
lock = Mutex()
def __init__(self, stream):
self.stream = getattr(sys, stream)
self.err = stream == 'stderr'
setattr(sys, stream, self)
def write(self, msg):
with self.lock:
cprint.cprint(self.stream, self.color(), msg, -1, stderr=self.err)
def flush(self):
with self.lock:
self.stream.flush()
def color(self):
tid = threading.current_thread()
if tid not in self.colors:
c = (len(self.colors) % 15) + 1
self.colors[tid] = c
return self.colors[tid]
| mit | 1,375,255,485,171,331,300 | 32.670401 | 161 | 0.522254 | false | 4.030964 | false | false | false |
cloudbase/maas | src/maasserver/migrations/0010_add_node_netboot.py | 1 | 11133 | # flake8: noqa
# SKIP this file when reformatting.
# The rest of this file was generated by South.
# encoding: utf-8
import datetime
from django.db import models
from maasserver.enum import NODE_STATUS
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Node.netboot'
db.add_column(u'maasserver_node', 'netboot', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False)
# Find all the allocated nodes with netboot=True.
allocated_nodes = orm['maasserver.node'].objects.filter(
status=NODE_STATUS.ALLOCATED, netboot=True)
# Set netboot=False on these nodes.
allocated_nodes.update(netboot=False)
def backwards(self, orm):
# Deleting field 'Node.netboot'
db.delete_column(u'maasserver_node', 'netboot')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maasserver.config': {
'Meta': {'object_name': 'Config'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('maasserver.fields.JSONObjectField', [], {'null': 'True'})
},
u'maasserver.filestorage': {
'Meta': {'object_name': 'FileStorage'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'filename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'maasserver.macaddress': {
'Meta': {'object_name': 'MACAddress'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_address': ('maasserver.fields.MACAddressField', [], {'unique': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.node': {
'Meta': {'object_name': 'Node'},
'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386'", 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}),
'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}),
'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-20250ca0-b8f4-11e1-afce-002215205ce8'", 'unique': 'True', 'max_length': '41'}),
'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.nodegroup': {
'Meta': {'object_name': 'NodeGroup'},
'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}),
'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}),
'broadcast_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_range_high': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'ip_range_low': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '80'}),
'router_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'subnet_mask': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'worker_ip': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'})
},
u'maasserver.sshkey': {
'Meta': {'unique_together': "((u'user', u'key'),)", 'object_name': 'SSHKey'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'maasserver.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'piston.consumer': {
'Meta': {'object_name': 'Consumer'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"})
},
'piston.token': {
'Meta': {'object_name': 'Token'},
'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1339989444L'}),
'token_type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}),
'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'})
}
}
complete_apps = ['maasserver']
| agpl-3.0 | -3,308,740,581,718,270,500 | 68.149068 | 182 | 0.548729 | false | 3.645383 | false | false | false |
veteman/thepython2blob | logicanalyser2fss.py | 1 | 5278 | #Copyright 2015 B. Johan G. Svensson
#Licensed under the terms of the MIT license (see LICENSE).
from __future__ import division
import struct, time, csv
import fsslib
class CSVWriter():
def __init__(self, fid, samplerate):
pass
class CSVReader():
def __init__(self, fid, samplerate):
self.samplerate = samplerate
self.csvfile = csv.reader(fid, skipinitialspace = True,strict=True)
self.csvfile.next() # Waste header
def getnext(self):
try:
linelst = self.csvfile.next()
except:
return False, False, False, False, False, False
dtime = int(round(float(linelst[0]) * self.samplerate))
data = linelst[1] == '1' # int() is too slow
indx = linelst[2] == '1' # int() is too slow
step = linelst[3] == '1' # int() is too slow
side = linelst[4] == '1' # int() is too slow
oper = linelst[5] == '1' # int() is too slow
return dtime, data, indx, step, side, oper
class BinDeltaWrite():
def __init__(self, fid, samplerate):
pass
class BinDeltaWriter():
def __init__(self, fid):
self.fid = fid
def write(self, dtime, data, indx, step, side, oper):
bitfield = data | (indx << 1) | (step << 2) | (side << 3) | (oper << 4)
data = struct.pack('<QB', dtime, bitfield)
self.fid.write(data)
class BinDeltaReader():
def __init__(self, fid):
self.fid = fid
def getnext(self):
data = self.fid.read(9)
if len(data) < 9:
return False, False, False, False, False, False
dtime, bitfield = struct.unpack('<QB',data)
data = bitfield & 1
indx = (bitfield >> 1) & 1
step = (bitfield >> 2) & 1
side = (bitfield >> 3) & 1
oper = (bitfield >> 4) & 1
return dtime, data, indx, step, side, oper
class BinDeltaReader2():
def __init__(self, fid, blocksize):
self.fid = fid
self.indx = 0
self.data = ''
self.datatuple = tuple()
self.blocksize = blocksize
def fillbuffer(self):
self.data = self.fid.read(9*self.blocksize)
length = len(self.data)//9
self.datatuple = struct.unpack('<' + length*'QB',self.data)
self.indx = 0
def getnext(self):
if self.indx + 1 >= len(self.datatuple):
self.fillbuffer()
if len(self.data) < 9:
return False, False, False, False, False, False
dtime = self.datatuple[self.indx]
bitfield = self.datatuple[self.indx + 1]
data = bitfield & 1
indx = (bitfield >> 1) & 1
step = (bitfield >> 2) & 1
side = (bitfield >> 3) & 1
oper = (bitfield >> 4) & 1
self.indx += 2
return dtime, data, indx, step, side, oper
def converttofss(fnamein,fnameout):
samplerate = 25000000
fin = open(fnamein,'rb')
fin.seek(0,2)
size = fin.tell()
fin.seek(0)
print 'Init readers'
#reader = CSVReader(fin, samplerate)
#reader = BinDeltaReader(fin)
reader = BinDeltaReader2(fin, 10000)
print 'Init buffers'
packer = fsslib.Pacman(samplerate)
print 'Init done'
howfar = 1
tim1sum = 0
tim2sum = 0
tim3sum = 0
tim4sum = 0
timtot = 0
howfar2 = 1
tic3 = time.clock()
while True:
tic0 = time.clock()
dtime, data, indx, step, side, oper = reader.getnext()
if not dtime:
break
tic1 = time.clock()
packer.store(dtime, data, indx, step, side, oper)
tic2 = time.clock()
howfar += 1
if howfar > 1000000:
print str(100*fin.tell()/size) + '%'
howfar = 1
print 'Time phase 1', tim1sum
print 'Time phase 2', tim2sum
print 'Time phase 3', tim3sum
print 'Time phase 4', tim4sum
print 'Time total', timtot
howfar2 += 1
tic3last = tic3
tic3 = time.clock()
tim1sum += tic1 - tic0
tim2sum += tic2 - tic1
tim3sum += tic3 - tic2
tim4sum += tic0 - tic3last
timtot += tic3 - tic3last
fin.close()
print 'Pack: Start'
outstr = packer.commit()
print 'Pack: Saving to file'
fout = open(fnameout,'wb')
fout.write(outstr)
fout.close()
print 'All done'
def convertfrfss(fnamein, fnameout):
fin = open(fnamein, 'rb')
print 'Reading file'
unpacker = fsslib.Streamer(fin)
samplerate = unpacker.samplerate
fin.close()
fout = open(fnameout, 'wb')
writer = BinDeltaWriter(fout)
print 'Decoding (will take LONG time!)'
while True:
stim, data, indx, step, side, oper = unpacker.getnext()
if stim is False:
break
writer.write(stim, data, indx, step, side, oper)
def test01():
converttofss('j:\\Transfer\\Midwinter-64bitDELTA8bit25MHz.bin', 'j:\\Transfer\\Midwinter.fss')
def test02():
convertfrfss('j:\\Transfer\\Midwinter.fss', 'j:\\Transfer\\Midwinter-DELTATEST.bin')
def main():
#test01()
test02()
if __name__ == '__main__':
main()
| mit | 2,724,560,461,482,590,700 | 28.333333 | 98 | 0.541872 | false | 3.359644 | false | false | false |
sempliva/Emotly | emotly/tests/test_model_user.py | 1 | 6285 | """
MIT License
Copyright (c) 2016 Emotly Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# User Model Test Case
import unittest
import datetime
import time
from mongoengine import ValidationError, NotUniqueError
from emotly import app
from emotly import constants as CONSTANTS
from emotly.models import User
# Model: User.
class UserModelTestCase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
def tearDown(self):
User.objects.delete()
def test_create_user(self):
u = User(nickname='testcreateuser',
email='[email protected]',
password="FakeUserPassword123", salt="salt")
self.assertTrue(u.save())
def test_create_3_user(self):
u = User(nickname='testemotly',
email='[email protected]',
password="FakeUserPassword123",
confirmed_email=True,
last_login=datetime.datetime.now(),
salt="salt")
u.save()
time.sleep(1) # sleep time in seconds
u1 = User(nickname='testemotly1',
email='[email protected]',
password="FakeUserPassword123",
confirmed_email=True,
last_login=datetime.datetime.now(),
salt="salt")
u1.save()
time.sleep(1) # sleep time in seconds
u2 = User(nickname='testemotly2',
email='[email protected]',
password="FakeUserPassword123",
confirmed_email=True,
last_login=datetime.datetime.now(),
salt="salt")
u2.save()
self.assertNotEqual(u.created_at, u1.created_at)
self.assertNotEqual(u1.created_at, u2.created_at)
def test_cannot_create_user_nickname_too_long(self):
u = User(nickname='VeryLongNicknameThatIsTooLong',
email='[email protected]',
password="FakeUserPassword123", salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_no_nickname(self):
u = User(email='[email protected]',
password="FakeUserPassword123",
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_nickname_too_short(self):
u = User(nickname='test',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_nickname_not_match_validation_regex(self):
u = User(nickname='test&@1235',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_nickname_not_match_validation_regex2(self):
u = User(nickname='^^^$$$$$!!',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_whitout_password(self):
u = User(nickname='testnopsw',
email='[email protected]',
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_whitout_salt(self):
u = User(nickname='testnosalt',
email='[email protected]',
password="FakeUserPassword123")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_whitout_email(self):
u = User(nickname='testnomail',
password="FakeUserPassword123",
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_email_not_valid(self):
u = User(nickname='testmailnovalid',
email='test_duplicateexample.com',
password="FakeUserPassword123",
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_email_duplicate_key(self):
u = User(nickname='testuser',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
u2 = User(nickname='testuser2',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
u.save()
self.assertRaises(NotUniqueError, u2.save)
def test_cannot_create_user_nickname_duplicate_key(self):
u = User(nickname='testnickname',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
u2 = User(nickname='testnickname',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
u.save()
self.assertRaises(NotUniqueError, u2.save)
| mit | -5,786,649,106,648,142,000 | 36.861446 | 78 | 0.608115 | false | 4.349481 | true | false | false |
Rayal/ROS_proov | ros_robotics_projects/chapter_7_codes/rostensorflow/image_recognition.py | 1 | 1917 | import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import String
from cv_bridge import CvBridge
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.models.image.imagenet import classify_image
class RosTensorFlow():
def __init__(self):
classify_image.maybe_download_and_extract()
self._session = tf.Session()
classify_image.create_graph()
self._cv_bridge = CvBridge()
self._sub = rospy.Subscriber('image', Image, self.callback, queue_size=1)
self._pub = rospy.Publisher('result', String, queue_size=1)
self.score_threshold = rospy.get_param('~score_threshold', 0.1)
self.use_top_k = rospy.get_param('~use_top_k', 5)
def callback(self, image_msg):
cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
# copy from
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/imagenet/classify_image.py
image_data = cv2.imencode('.jpg', cv_image)[1].tostring()
# Creates graph from saved GraphDef.
softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
predictions = self._session.run(
softmax_tensor, {'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = classify_image.NodeLookup()
top_k = predictions.argsort()[-self.use_top_k:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
if score > self.score_threshold:
rospy.loginfo('%s (score = %.5f)' % (human_string, score))
self._pub.publish(human_string)
def main(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node('rostensorflow')
tensor = RosTensorFlow()
tensor.main()
| mit | -6,235,122,650,728,627,000 | 38.122449 | 113 | 0.633281 | false | 3.596623 | false | false | false |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/models/match_spec.py | 1 | 31362 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import Mapping
from functools import reduce
from os.path import basename
import re
from .channel import Channel
from .dist import Dist
from .records import PackageRecord, PackageRef
from .version import BuildNumberMatch, VersionSpec
from .._vendor.auxlib.collection import frozendict
from ..base.constants import CONDA_TARBALL_EXTENSION
from ..common.compat import (isiterable, iteritems, itervalues, string_types, text_type,
with_metaclass)
from ..common.path import expand
from ..common.url import is_url, path_to_url, unquote
from ..exceptions import CondaValueError
try:
from cytoolz.itertoolz import concat, concatv, groupby
except ImportError: # pragma: no cover
from .._vendor.toolz.itertoolz import concat, concatv, groupby # NOQA
class MatchSpecType(type):
def __call__(cls, spec_arg=None, **kwargs):
if spec_arg:
if isinstance(spec_arg, MatchSpec) and not kwargs:
return spec_arg
elif isinstance(spec_arg, MatchSpec):
new_kwargs = dict(spec_arg._match_components)
new_kwargs.setdefault('optional', spec_arg.optional)
new_kwargs.setdefault('target', spec_arg.target)
new_kwargs.update(**kwargs)
return super(MatchSpecType, cls).__call__(**new_kwargs)
elif isinstance(spec_arg, string_types):
parsed = _parse_spec_str(spec_arg)
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, Mapping):
parsed = dict(spec_arg, **kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, PackageRef):
parsed = {
'channel': spec_arg.channel,
'subdir': spec_arg.subdir,
'name': spec_arg.name,
'version': spec_arg.version,
'build': spec_arg.build,
}
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, Dist):
# TODO: remove this branch when we get rid of Dist
parsed = {
'name': spec_arg.name,
'version': spec_arg.version,
'build': spec_arg.build,
}
if spec_arg.channel:
parsed['channel'] = spec_arg.channel
if spec_arg.subdir:
parsed['subdir'] = spec_arg.subdir
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif hasattr(spec_arg, 'dump'):
parsed = spec_arg.dump()
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
else:
raise CondaValueError("Invalid MatchSpec:\n spec_arg=%s\n kwargs=%s"
% (spec_arg, kwargs))
else:
return super(MatchSpecType, cls).__call__(**kwargs)
@with_metaclass(MatchSpecType)
class MatchSpec(object):
"""
:class:`MatchSpec` is, fundamentally, a query language for conda packages. Any of the fields
that comprise a :class:`PackageRecord` can be used to compose a :class:`MatchSpec`.
:class:`MatchSpec` can be composed with keyword arguments, where keys are any of the
attributes of :class:`PackageRecord`. Values for keyword arguments are the exact values the
attribute should match against. Many fields can also be matched against non-exact values--by
including wildcard `*` and `>`/`<` ranges--where supported. Any non-specified field is
the equivalent of a full wildcard match.
:class:`MatchSpec` can also be composed using a single positional argument, with optional
keyword arguments. Keyword arguments also override any conflicting information provided in
the positional argument. The positional argument can be either an existing :class:`MatchSpec`
instance or a string. Conda has historically had several string representations for equivalent
:class:`MatchSpec`s. This :class:`MatchSpec` should accept any existing valid spec string, and
correctly compose a :class:`MatchSpec` instance.
A series of rules are now followed for creating the canonical string representation of a
:class:`MatchSpec` instance. The canonical string representation can generically be
represented by
(channel(/subdir):(namespace):)name(version(build))[key1=value1,key2=value2]
where `()` indicate optional fields. The rules for constructing a canonical string
representation are:
1. `name` (i.e. "package name") is required, but its value can be '*'. Its position is always
outside the key-value brackets.
2. If `version` is an exact version, it goes outside the key-value brackets and is prepended
by `==`. If `version` is a "fuzzy" value (e.g. `1.11.*`), it goes outside the key-value
brackets with the `.*` left off and is prepended by `=`. Otherwise `version` is included
inside key-value brackets.
3. If `version` is an exact version, and `build` is an exact value, `build` goes outside
key-value brackets prepended by a `=`. Otherwise, `build` goes inside key-value brackets.
`build_string` is an alias for `build`.
4. The `namespace` position is being held for a future conda feature.
5. If `channel` is included and is an exact value, a `::` separator is ued between `channel`
and `name`. `channel` can either be a canonical channel name or a channel url. In the
canonical string representation, the canonical channel name will always be used.
6. If `channel` is an exact value and `subdir` is an exact value, `subdir` is appended to
`channel` with a `/` separator. Otherwise, `subdir` is included in the key-value brackets.
7. Key-value brackets can be delimited by comma, space, or comma+space. Value can optionally
be wrapped in single or double quotes, but must be wrapped if `value` contains a comma,
space, or equal sign. The canonical format uses comma delimiters and single quotes.
8. When constructing a :class:`MatchSpec` instance from a string, any key-value pair given
inside the key-value brackets overrides any matching parameter given outside the brackets.
When :class:`MatchSpec` attribute values are simple strings, the are interpreted using the
following conventions:
- If the string begins with `^` and ends with `$`, it is converted to a regex.
- If the string contains an asterisk (`*`), it is transformed from a glob to a regex.
- Otherwise, an exact match to the string is sought.
Examples:
>>> str(MatchSpec(name='foo', build='py2*', channel='conda-forge'))
'conda-forge::foo[build=py2*]'
>>> str(MatchSpec('foo 1.0 py27_0'))
'foo==1.0=py27_0'
>>> str(MatchSpec('foo=1.0=py27_0'))
'foo==1.0=py27_0'
>>> str(MatchSpec('conda-forge::foo[version=1.0.*]'))
'conda-forge::foo=1.0'
>>> str(MatchSpec('conda-forge/linux-64::foo>=1.0'))
"conda-forge/linux-64::foo[version='>=1.0']"
>>> str(MatchSpec('*/linux-64::foo>=1.0'))
"foo[subdir=linux-64,version='>=1.0']"
To fully-specify a package with a full, exact spec, the fields
- channel
- subdir
- name
- version
- build
must be given as exact values. In the future, the namespace field will be added to this list.
Alternatively, an exact spec is given by '*[md5=12345678901234567890123456789012]'.
"""
FIELD_NAMES = (
'channel',
'subdir',
'name',
'version',
'build',
'build_number',
'track_features',
'features',
'url',
'md5',
)
def __init__(self, optional=False, target=None, **kwargs):
self.optional = optional
self.target = target
self._match_components = self._build_components(**kwargs)
def get_exact_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.exact_value
def get_raw_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.raw_value
def get(self, field_name, default=None):
v = self.get_raw_value(field_name)
return default if v is None else v
@property
def is_name_only_spec(self):
return (len(self._match_components) == 1
and 'name' in self._match_components
and self.name != '*')
def dist_str(self):
return self.__str__()
def match(self, rec):
"""
Accepts an `IndexRecord` or a dict, and matches can pull from any field
in that record. Returns True for a match, and False for no match.
"""
if isinstance(rec, dict):
rec = PackageRecord.from_objects(rec)
for field_name, v in iteritems(self._match_components):
if not self._match_individual(rec, field_name, v):
return False
return True
def _match_individual(self, record, field_name, match_component):
val = getattr(record, field_name)
try:
return match_component.match(val)
except AttributeError:
return match_component == val
def _is_simple(self):
return len(self._match_components) == 1 and self.get_exact_value('name') is not None
def _is_single(self):
return len(self._match_components) == 1
def _to_filename_do_not_use(self):
# WARNING: this is potentially unreliable and use should probably be limited
# returns None if a filename can't be constructed
fn_field = self.get_exact_value('fn')
if fn_field:
return fn_field
vals = tuple(self.get_exact_value(x) for x in ('name', 'version', 'build'))
if not any(x is None for x in vals):
return '%s-%s-%s.tar.bz2' % vals
else:
return None
def __repr__(self):
builder = []
builder += ["%s=%r" % (c, self._match_components[c])
for c in self.FIELD_NAMES if c in self._match_components]
if self.optional:
builder.append("optional=True")
if self.target:
builder.append("target=%r" % self.target)
return "%s(%s)" % (self.__class__.__name__, ', '.join(builder))
def __str__(self):
builder = []
brackets = []
channel_matcher = self._match_components.get('channel')
if channel_matcher and channel_matcher.exact_value:
builder.append(text_type(channel_matcher))
elif channel_matcher and not channel_matcher.matches_all:
brackets.append("channel=%s" % text_type(channel_matcher))
subdir_matcher = self._match_components.get('subdir')
if subdir_matcher:
if channel_matcher and channel_matcher.exact_value:
builder.append('/%s' % subdir_matcher)
else:
brackets.append("subdir=%s" % subdir_matcher)
name_matcher = self._match_components.get('name', '*')
builder.append(('::%s' if builder else '%s') % name_matcher)
version_exact = False
version = self._match_components.get('version')
if version:
version = text_type(version)
if any(s in version for s in '><$^|,'):
brackets.append("version='%s'" % version)
elif version.endswith('.*'):
builder.append('=' + version[:-2])
elif version.endswith('*'):
builder.append('=' + version[:-1])
elif version.startswith('=='):
builder.append(version)
version_exact = True
else:
builder.append('==' + version)
version_exact = True
build = self._match_components.get('build')
if build:
build = text_type(build)
if any(s in build for s in '><$^|,'):
brackets.append("build='%s'" % build)
elif '*' in build:
brackets.append("build=%s" % build)
elif version_exact:
builder.append('=' + build)
else:
brackets.append("build=%s" % build)
_skip = ('channel', 'subdir', 'name', 'version', 'build')
for key in self.FIELD_NAMES:
if key not in _skip and key in self._match_components:
if key == 'url' and channel_matcher:
# skip url in canonical str if channel already included
continue
value = text_type(self._match_components[key])
if any(s in value for s in ', ='):
brackets.append("%s='%s'" % (key, value))
else:
brackets.append("%s=%s" % (key, value))
if brackets:
builder.append('[%s]' % ','.join(brackets))
return ''.join(builder)
def __json__(self):
return self.__str__()
def conda_build_form(self):
builder = []
name = self.get_exact_value('name')
assert name
builder.append(name)
build = self.get_raw_value('build')
version = self.get_raw_value('version')
if build:
assert version
builder += [version, build]
elif version:
builder.append(version)
return ' '.join(builder)
def __eq__(self, other):
if isinstance(other, MatchSpec):
self_key = self._match_components, self.optional, self.target
other_key = other._match_components, other.optional, other.target
return self_key == other_key
else:
return False
def __hash__(self):
return hash((self._match_components, self.optional, self.target))
def __contains__(self, field):
return field in self._match_components
@staticmethod
def _build_components(**kwargs):
def _make(field_name, value):
if field_name not in PackageRecord.__fields__:
raise CondaValueError('Cannot match on field %s' % (field_name,))
elif isinstance(value, string_types):
value = text_type(value)
if hasattr(value, 'match'):
matcher = value
elif field_name in _implementors:
matcher = _implementors[field_name](value)
else:
matcher = StrMatch(text_type(value))
return field_name, matcher
return frozendict(_make(key, value) for key, value in iteritems(kwargs))
@property
def name(self):
return self.get_exact_value('name') or '*'
#
# Remaining methods are for back compatibility with conda-build. Do not remove
# without coordination with the conda-build team.
#
@property
def strictness(self):
# With the old MatchSpec, strictness==3 if name, version, and
# build were all specified.
s = sum(f in self._match_components for f in ('name', 'version', 'build'))
if s < len(self._match_components):
return 3
elif not self.get_exact_value('name') or 'build' in self._match_components:
return 3
elif 'version' in self._match_components:
return 2
else:
return 1
@property
def spec(self):
return self.conda_build_form()
@property
def version(self):
# in the old MatchSpec object, version was a VersionSpec, not a str
# so we'll keep that API here
return self._match_components.get('version')
@property
def fn(self):
val = self.get_raw_value('fn') or self.get_raw_value('url')
if val:
val = basename(val)
assert val
return val
@classmethod
def merge(cls, match_specs):
match_specs = tuple(cls(s) for s in match_specs)
grouped = groupby(lambda spec: spec.get_exact_value('name'), match_specs)
dont_merge_these = grouped.pop('*', []) + grouped.pop(None, [])
specs_map = {
name: reduce(lambda x, y: x._merge(y), specs) if len(specs) > 1 else specs[0]
for name, specs in iteritems(grouped)
}
return tuple(concatv(itervalues(specs_map), dont_merge_these))
def _merge(self, other):
if self.optional != other.optional or self.target != other.target:
raise ValueError("Incompatible MatchSpec merge: - %s\n - %s" % (self, other))
final_components = {}
component_names = set(self._match_components) | set(other._match_components)
for component_name in component_names:
this_component = self._match_components.get(component_name)
that_component = other._match_components.get(component_name)
if this_component is None and that_component is None:
continue
elif this_component is None:
final_components[component_name] = that_component
elif that_component is None:
final_components[component_name] = this_component
else:
final_components[component_name] = this_component.merge(that_component)
return self.__class__(optional=self.optional, target=self.target, **final_components)
def _parse_version_plus_build(v_plus_b):
"""This should reliably pull the build string out of a version + build string combo.
Examples:
>>> _parse_version_plus_build("=1.2.3 0")
('=1.2.3', '0')
>>> _parse_version_plus_build("1.2.3=0")
('1.2.3', '0')
>>> _parse_version_plus_build(">=1.0 , < 2.0 py34_0")
('>=1.0,<2.0', 'py34_0')
>>> _parse_version_plus_build(">=1.0 , < 2.0 =py34_0")
('>=1.0,<2.0', 'py34_0')
>>> _parse_version_plus_build("=1.2.3 ")
('=1.2.3', None)
>>> _parse_version_plus_build(">1.8,<2|==1.7")
('>1.8,<2|==1.7', None)
>>> _parse_version_plus_build("* openblas_0")
('*', 'openblas_0')
>>> _parse_version_plus_build("* *")
('*', '*')
"""
parts = re.search(r'((?:.+?)[^><!,|]?)(?:(?<![=!|,<>])(?:[ =])([^-=,|<>]+?))?$', v_plus_b)
if parts:
version, build = parts.groups()
build = build and build.strip()
else:
version, build = v_plus_b, None
return version and version.replace(' ', ''), build
def _parse_legacy_dist(dist_str):
"""
Examples:
>>> _parse_legacy_dist("_license-1.1-py27_1.tar.bz2")
('_license', '1.1', 'py27_1')
>>> _parse_legacy_dist("_license-1.1-py27_1")
('_license', '1.1', 'py27_1')
"""
if dist_str.endswith(CONDA_TARBALL_EXTENSION):
dist_str = dist_str[:-len(CONDA_TARBALL_EXTENSION)]
name, version, build = dist_str.rsplit('-', 2)
return name, version, build
def _parse_channel(channel_val):
if not channel_val:
return None, None
chn = Channel(channel_val)
channel_name = chn.name
return channel_name, chn.subdir
def _parse_spec_str(spec_str):
# pre-step for ugly backward compat
if spec_str.endswith('@'):
feature_name = spec_str[:-1]
return {
'name': '*',
'track_features': (feature_name,),
}
# Step 1. strip '#' comment
if '#' in spec_str:
ndx = spec_str.index('#')
spec_str, _ = spec_str[:ndx], spec_str[ndx:]
spec_str.strip()
# Step 2. done if spec_str is a tarball
if spec_str.endswith(CONDA_TARBALL_EXTENSION):
# treat as a normal url
if not is_url(spec_str):
spec_str = unquote(path_to_url(expand(spec_str)))
channel = Channel(spec_str)
if channel.subdir:
name, version, build = _parse_legacy_dist(channel.package_filename)
result = {
'channel': channel.canonical_name,
'subdir': channel.subdir,
'name': name,
'version': version,
'build': build,
'fn': channel.package_filename,
'url': spec_str,
}
else:
# url is not a channel
return {
'name': '*',
'fn': basename(spec_str),
'url': spec_str,
}
return result
# Step 3. strip off brackets portion
brackets = {}
m3 = re.match(r'.*(?:(\[.*\]))', spec_str)
if m3:
brackets_str = m3.groups()[0]
spec_str = spec_str.replace(brackets_str, '')
brackets_str = brackets_str[1:-1]
m3b = re.finditer(r'([a-zA-Z0-9_-]+?)=(["\']?)([^\'"]*?)(\2)(?:[, ]|$)', brackets_str)
for match in m3b:
key, _, value, _ = match.groups()
if not key or not value:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
brackets[key] = value
# Step 4. strip off parens portion
m4 = re.match(r'.*(?:(\(.*\)))', spec_str)
parens = {}
if m4:
parens_str = m4.groups()[0]
spec_str = spec_str.replace(parens_str, '')
parens_str = parens_str[1:-1]
m4b = re.finditer(r'([a-zA-Z0-9_-]+?)=(["\']?)([^\'"]*?)(\2)(?:[, ]|$)', parens_str)
for match in m4b:
key, _, value, _ = match.groups()
parens[key] = value
if 'optional' in parens_str:
parens['optional'] = True
# Step 5. strip off '::' channel and namespace
m5 = spec_str.rsplit(':', 2)
m5_len = len(m5)
if m5_len == 3:
channel_str, namespace, spec_str = m5
elif m5_len == 2:
namespace, spec_str = m5
channel_str = None
elif m5_len:
spec_str = m5[0]
channel_str, namespace = None, None
else:
raise NotImplementedError()
channel, subdir = _parse_channel(channel_str)
if 'channel' in brackets:
b_channel, b_subdir = _parse_channel(brackets.pop('channel'))
if b_channel:
channel = b_channel
if b_subdir:
subdir = b_subdir
if 'subdir' in brackets:
subdir = brackets.pop('subdir')
# Step 6. strip off package name from remaining version + build
m3 = re.match(r'([^ =<>!]+)?([><!= ].+)?', spec_str)
if m3:
name, spec_str = m3.groups()
if name is None:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
else:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
# Step 7. otherwise sort out version + build
spec_str = spec_str and spec_str.strip()
# This was an attempt to make MatchSpec('numpy-1.11.0-py27_0') work like we'd want. It's
# not possible though because plenty of packages have names with more than one '-'.
# if spec_str is None and name.count('-') >= 2:
# name, version, build = _parse_legacy_dist(name)
if spec_str:
if '[' in spec_str:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
version, build = _parse_version_plus_build(spec_str)
# translate version '=1.2.3' to '1.2.3*'
# is it a simple version starting with '='? i.e. '=1.2.3'
if version.startswith('='):
test_str = version[1:]
if version.startswith('==') and build is None:
version = version[2:]
elif not any(c in test_str for c in "=,|"):
if build is None and not test_str.endswith('*'):
version = test_str + '*'
else:
version = test_str
else:
version, build = None, None
# Step 8. now compile components together
components = {}
components['name'] = name if name else '*'
if channel is not None:
components['channel'] = channel
if subdir is not None:
components['subdir'] = subdir
if namespace is not None:
# components['namespace'] = namespace
pass
if version is not None:
components['version'] = version
if build is not None:
components['build'] = build
# anything in brackets will now strictly override key as set in other area of spec str
components.update(brackets)
return components
@with_metaclass(ABCMeta)
class MatchInterface(object):
def __init__(self, value):
self._raw_value = value
@abstractmethod
def match(self, other):
raise NotImplementedError()
def matches(self, value):
return self.match(value)
@property
def raw_value(self):
return self._raw_value
@abstractproperty
def exact_value(self):
"""If the match value is an exact specification, returns the value.
Otherwise returns None.
"""
raise NotImplementedError()
@abstractmethod
def merge(self, other):
raise NotImplementedError()
class SplitStrMatch(MatchInterface):
__slots__ = '_raw_value',
def __init__(self, value):
super(SplitStrMatch, self).__init__(self._convert(value))
def _convert(self, value):
try:
return frozenset(value.replace(' ', ',').split(','))
except AttributeError:
if isiterable(value):
return frozenset(value)
raise
def match(self, other):
try:
return other and self._raw_value & other._raw_value
except AttributeError:
return self._raw_value & self._convert(other)
def __repr__(self):
if self._raw_value:
return "{%s}" % ', '.join("'%s'" % s for s in sorted(self._raw_value))
else:
return 'set()'
def __str__(self):
# this space delimiting makes me nauseous
return ' '.join(sorted(self._raw_value))
def __eq__(self, other):
return isinstance(other, self.__class__) and self._raw_value == other._raw_value
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value
def merge(self, other):
if self.raw_value != other.raw_value:
raise ValueError("Incompatible component merge:\n - %r\n - %r"
% (self.raw_value, other.raw_value))
return self.raw_value
class FeatureMatch(MatchInterface):
__slots__ = '_raw_value',
def __init__(self, value):
super(FeatureMatch, self).__init__(self._convert(value))
def _convert(self, value):
if not value:
return frozenset()
elif isinstance(value, string_types):
return frozenset(f for f in (
ff.strip() for ff in value.replace(' ', ',').split(',')
) if f)
else:
return frozenset(f for f in (ff.strip() for ff in value) if f)
def match(self, other):
other = self._convert(other)
return self._raw_value == other
def __repr__(self):
return "[%s]" % ', '.join("'%s'" % k for k in sorted(self._raw_value))
def __str__(self):
return ' '.join(sorted(self._raw_value))
def __eq__(self, other):
return isinstance(other, self.__class__) and self._raw_value == other._raw_value
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value
def merge(self, other):
if self.raw_value != other.raw_value:
raise ValueError("Incompatible component merge:\n - %r\n - %r"
% (self.raw_value, other.raw_value))
return self.raw_value
class StrMatch(MatchInterface):
__slots__ = '_raw_value', '_re_match'
def __init__(self, value):
super(StrMatch, self).__init__(value)
self._re_match = None
if value.startswith('^') and value.endswith('$'):
self._re_match = re.compile(value).match
elif '*' in value:
self._re_match = re.compile(r'^(?:%s)$' % value.replace('*', r'.*')).match
def match(self, other):
try:
_other_val = other._raw_value
except AttributeError:
_other_val = text_type(other)
if self._re_match:
return self._re_match(_other_val)
else:
return self._raw_value == _other_val
def __str__(self):
return self._raw_value
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._raw_value)
def __eq__(self, other):
return isinstance(other, self.__class__) and self._raw_value == other._raw_value
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value if self._re_match is None else None
@property
def matches_all(self):
return self._raw_value == '*'
def merge(self, other):
if self.raw_value != other.raw_value:
raise ValueError("Incompatible component merge:\n - %r\n - %r"
% (self.raw_value, other.raw_value))
return self.raw_value
class ChannelMatch(StrMatch):
def __init__(self, value):
self._re_match = None
if isinstance(value, string_types):
if value.startswith('^') and value.endswith('$'):
self._re_match = re.compile(value).match
elif '*' in value:
self._re_match = re.compile(r'^(?:%s)$' % value.replace('*', r'.*')).match
else:
value = Channel(value)
super(StrMatch, self).__init__(value) # lgtm [py/super-not-enclosing-class]
def match(self, other):
try:
_other_val = Channel(other._raw_value)
except AttributeError:
_other_val = Channel(other)
if self._re_match:
return self._re_match(_other_val.canonical_name)
else:
# assert ChannelMatch('pkgs/free').match('defaults') is False
# assert ChannelMatch('defaults').match('pkgs/free') is True
return (self._raw_value.name == _other_val.name
or self._raw_value.name == _other_val.canonical_name)
def __str__(self):
try:
return "%s" % self._raw_value.name
except AttributeError:
return "%s" % self._raw_value
def __repr__(self):
return "'%s'" % self.__str__()
def merge(self, other):
if self.raw_value != other.raw_value:
raise ValueError("Incompatible component merge:\n - %r\n - %r"
% (self.raw_value, other.raw_value))
return self.raw_value
class LowerStrMatch(StrMatch):
def __init__(self, value):
super(LowerStrMatch, self).__init__(value.lower())
_implementors = {
'name': LowerStrMatch,
'track_features': FeatureMatch,
'features': FeatureMatch,
'version': VersionSpec,
'build_number': BuildNumberMatch,
'channel': ChannelMatch,
}
| apache-2.0 | -8,014,581,032,672,361,000 | 35.048276 | 99 | 0.568554 | false | 3.956352 | false | false | false |
open-austin/capture | distance.py | 1 | 4386 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import argparse
import glob
import os
import numpy as np
import pandas as pd
from geopy.distance import vincenty as point_distance
def ingest(fn, route_id, begin_latlng, end_latlng):
df = pd.read_csv(fn, parse_dates=['timestamp'])
df = df.drop(['speed', 'trip_headsign'], axis=1)
df = df[df.route_id == route_id]
df['begin_distances'] = compute_distance(df, begin_latlng)
df['end_distances'] = compute_distance(df, end_latlng)
return df
def compute_distance(df, latlng):
df = df.copy()
starts = zip(df.latitude, df.longitude)
return [point_distance(latlng, s).meters for s in starts]
def parse_duration(df):
'''
for each trip id
choose a reading nearest the begin stop
choose a reading nearest downtown
subtract the times for those two readings
positive is southbound
'''
mins = df.groupby('trip_id').idxmin()
begin_mins = df.loc[mins.begin_distances].set_index('trip_id')
end_mins = df.loc[mins.end_distances].set_index('trip_id')
unneeded_cols = ['begin_distances', 'end_distances', 'latitude', 'longitude']
begin_mins.drop(unneeded_cols, axis=1, inplace=True)
end_mins.drop(['vehicle_id', 'route_id'] + unneeded_cols, axis=1, inplace=True)
result = begin_mins.join(end_mins, rsuffix='_begin', lsuffix='_end')
duration = begin_mins.timestamp - end_mins.timestamp
result['duration'] = duration / np.timedelta64(1, 's')
return result
def parse_duration_by_hour(df):
df['duration_abs'] = df['duration'].abs()
df['hour'] = df['timestamp_begin'].apply(
lambda x: x.tz_localize('UTC').tz_convert('US/Central').hour
)
df_byhour = df.groupby('hour')
results = pd.concat([
df_byhour['duration_abs'].count(),
df_byhour['duration_abs'].mean()
], axis=1, keys=['count', 'mean'])
return results.reindex(index=range(0, 24))
def parse(capmetrics_path=None, leglob=None, route_id=None, begin_lat=None, begin_lon=None, end_lat=None, end_lon=None, name=None):
df_total = pd.DataFrame()
data_glob = os.path.join(capmetrics_path, 'data', 'vehicle_positions', leglob)
files = glob.glob(data_glob)
for i, fname in enumerate(files):
print('({}/{}) Ingesting {}'.format(i + 1, len(files), fname))
try:
df_ingested = ingest(fname, route_id, (begin_lat, begin_lon), (end_lat, end_lon))
df_duration = parse_duration(df_ingested)
df_total = pd.concat([df_total, df_duration])
except Exception as e:
print(e)
print('Skipping ', fname)
if df_total.empty:
print('No vehicle positions found')
return
return parse_duration_by_hour(df_duration)
def main():
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument('--capmetrics_path', help='Path to the capmetrics directory', required=True, type=str)
parser.add_argument('--glob', help='Glob of vehicle positions CSV files', required=True, type=str)
parser.add_argument('--name', help='Name of the output file', required=True, type=str)
parser.add_argument('--route_id', help='Route ID', required=True, type=int)
parser.add_argument('--begin_lat', help='Latitude of first stop', required=True, type=float)
parser.add_argument('--begin_lon', help='Longitude of first stop', required=True, type=float)
parser.add_argument('--end_lat', help='Latitude of second stop', required=True, type=float)
parser.add_argument('--end_lon', help='Longitude of second stop', required=True, type=float)
args = parser.parse_args()
results = parse(
capmetrics_path=args.capmetrics_path,
name=args.name,
leglob=args.glob,
route_id=args.route_id,
begin_lat=args.begin_lat,
begin_lon=args.begin_lon,
end_lat=args.end_lat,
end_lon=args.end_lon
)
output_filename = '{route_id}_{name}_{glob}'.format(route_id=args.route_id, glob=args.glob, name=args.name)
output_path_duration_by_hour = 'results/duration_by_hour/{}.csv'.format(output_filename)
results.to_csv(output_path_duration_by_hour, header=True, sep='\t')
print('Saved duration by hour to {}'.format(output_path_duration_by_hour))
if __name__ == '__main__':
main()
| gpl-3.0 | -4,384,529,574,560,891,400 | 35.247934 | 131 | 0.649111 | false | 3.358346 | false | false | false |
borg-project/borg | borg/unix/accounting.py | 1 | 8038 | """@author: Bryan Silverthorn <[email protected]>"""
import os
import select
import signal
import datetime
import collections
import borg
log = borg.get_logger(__name__)
class SessionTimeAccountant(object):
"""
Track the total CPU (user) time for members of a session.
Process accounting under Linux is a giant pain, especially without root
access. In the general case, it is literally impossible (without patching
the kernel or some such craziness). Whatever. We do our best. Slightly
fancier schemes are available, but they're not much fancier---they're
mostly good only at making it harder for processes to actively evade being
charged. For primarily long-running processes that act in good faith, we
should do ok.
"""
def __init__(self, sid):
"""
Initialize.
"""
self.sid = sid
self.charged = {}
def audit(self):
"""
Update estimates.
"""
for p in borg.unix.proc.ProcessStat.in_session(self.sid):
self.charged[p.pid] = p.user_time
@property
def total(self):
"""
Return estimated total.
"""
return sum(self.charged.values(), datetime.timedelta())
class PollingReader(object):
"""
Read from file descriptors with timeout.
"""
def __init__(self, fds):
"""
Initialize.
"""
self.fds = fds
self.polling = select.poll()
for fd in fds:
self.polling.register(fd, select.POLLIN)
def unregister(self, fds):
"""
Unregister descriptors.
"""
for fd in fds:
self.polling.unregister(fd)
self.fds.remove(fd)
def read(self, timeout = -1):
"""
Read with an optional timeout.
"""
changed = self.polling.poll(timeout * 1000)
for (fd, event) in changed:
log.debug("event on fd %i is %#o", fd, event)
if event & select.POLLIN:
# POLLHUP is level-triggered; we'll be back if it was missed
return (fd, os.read(fd, 65536))
elif event & select.POLLHUP:
return (fd, "")
else:
raise IOError("unexpected poll response %#o from file descriptor" % event)
return (None, None)
CPU_LimitedRun = \
collections.namedtuple(
"CPU_LimitedRun",
[
"started",
"limit",
"out_chunks",
"err_chunks",
"usage_elapsed",
"proc_elapsed",
"exit_status",
"exit_signal",
],
)
def run_cpu_limited(
arguments,
limit,
pty = True,
environment = {},
resolution = 0.5,
):
"""
Spawn a subprocess whose process tree is granted limited CPU (user) time.
@param environment Override specific existing environment variables.
The subprocess must not expect input. This method is best suited to
processes which may run for a reasonable amount of time (eg, at least
several seconds); it will be fairly inefficient (and ineffective) at
fine-grained limiting of CPU allocation to short-duration processes.
We run the process and read its output. Every time we receive a chunk of
data, or every C{resolution} seconds, we estimate the total CPU time used
by the session---and store that information with the chunk of output, if
any. After at least C{limit} of CPU time has been used by the spawned
session, or after the session leader terminates, whichever is first, the
session is (sig)killed, the session leader waited on, and any data
remaining in the pipe is read.
Note that the use of SIGKILL means that child processes *cannot* perform
their own cleanup.
If C{pty} is specified, process stdout is piped through a pty, which makes
process output less likely to be buffered. This behavior is the default.
Kernel-reported resource usage includes the sum of all directly and
indirectly waited-on children. It will be accurate in the common case where
processes terminate after correctly waiting on their children, and
inaccurate in cases where zombies are reparented to init. Elapsed CPU time
taken from the /proc accounting mechanism is used to do CPU time limiting,
and will always be at least the specified limit.
"""
log.detail("running %s for %s", arguments, limit)
# sanity
if not arguments:
raise ValueError()
# start the run
popened = None
fd_chunks = {}
exit_pid = None
started = datetime.datetime.utcnow()
try:
# start running the child process
if pty:
popened = borg.unix.sessions.spawn_pty_session(arguments, environment)
else:
popened = borg.unix.sessions.spawn_pipe_session(arguments, environment)
fd_chunks = {
popened.stdout.fileno(): [],
popened.stderr.fileno(): [],
}
log.debug("spawned child with pid %i", popened.pid)
# read the child's output while accounting (note that the session id
# is, under Linux, the pid of the session leader)
accountant = SessionTimeAccountant(popened.pid)
reader = PollingReader(fd_chunks.keys())
while reader.fds:
# nuke if we're past cutoff
if accountant.total >= limit:
popened.kill()
break
# read from and audit the child process
(chunk_fd, chunk) = reader.read(resolution)
accountant.audit()
if chunk is not None:
log.debug(
"got %i bytes at %s (user time) on fd %i; chunk follows:\n%s",
len(chunk),
accountant.total,
chunk_fd,
chunk,
)
if chunk != "":
fd_chunks[chunk_fd].append((accountant.total, chunk))
else:
reader.unregister([chunk_fd])
# wait for our child to die
(exit_pid, termination, usage) = os.wait4(popened.pid, 0)
# nuke the session from orbit (it's the only way to be sure)
borg.unix.sessions.kill_session(popened.pid, signal.SIGKILL)
except:
# something has gone awry, so we need to kill our children
log.warning("something went awry! (our pid is %i)", os.getpid())
raised = borg.util.Raised()
if exit_pid is None and popened is not None:
try:
# nuke the entire session
borg.unix.sessions.kill_session(popened.pid, signal.SIGKILL)
# and don't leave the child as a zombie
os.waitpid(popened.pid, 0)
except:
borg.util.Raised().print_ignored()
raised.re_raise()
else:
# grab any output left in the kernel buffers
while reader.fds:
(chunk_fd, chunk) = reader.read(128.0)
if chunk:
fd_chunks[chunk_fd].append((accountant.total, chunk))
elif chunk_fd:
reader.unregister([chunk_fd])
else:
raise RuntimeError("final read from child timed out; undead child?")
# done
from datetime import timedelta
return \
CPU_LimitedRun(
started,
limit,
fd_chunks[popened.stdout.fileno()],
fd_chunks[popened.stderr.fileno()],
timedelta(seconds = usage.ru_utime),
accountant.total,
os.WEXITSTATUS(termination) if os.WIFEXITED(termination) else None,
os.WTERMSIG(termination) if os.WIFSIGNALED(termination) else None,
)
finally:
# let's not leak file descriptors
if popened is not None:
popened.stdout.close()
popened.stderr.close()
| mit | -8,009,201,627,987,073,000 | 30.155039 | 90 | 0.581488 | false | 4.270988 | false | false | false |
pudo/archivekit | archivekit/store/s3.py | 1 | 4834 | import os
from urllib2 import urlopen
from boto.s3.connection import S3Connection, S3ResponseError
from boto.s3.connection import Location
from archivekit.store.common import Store, StoreObject, MANIFEST
DELIM = os.path.join(' ', ' ').strip()
ALL_USERS = 'http://acs.amazonaws.com/groups/global/AllUsers'
class S3Store(Store):
def __init__(self, aws_key_id=None, aws_secret=None, bucket_name=None,
prefix=None, location=Location.EU, **kwargs):
if aws_key_id is None:
aws_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret = os.environ.get('AWS_SECRET_ACCESS_KEY')
self.aws_key_id = aws_key_id
self.aws_secret = aws_secret
if bucket_name is None:
bucket_name = os.environ.get('AWS_BUCKET_NAME')
self.bucket_name = bucket_name
self.prefix = prefix
self.location = location
self._bucket = None
@property
def bucket(self):
if self._bucket is None:
self.conn = S3Connection(self.aws_key_id, self.aws_secret)
try:
self._bucket = self.conn.get_bucket(self.bucket_name)
except S3ResponseError, se:
if se.status != 404:
raise
self._bucket = self.conn.create_bucket(self.bucket_name,
location=self.location)
return self._bucket
def get_object(self, collection, package_id, path):
return S3StoreObject(self, collection, package_id, path)
def _get_prefix(self, collection):
prefix = collection
if self.prefix:
prefix = os.path.join(self.prefix, prefix)
return os.path.join(prefix, '')
def list_collections(self):
prefix = os.path.join(self.prefix, '') if self.prefix else None
for prefix in self.bucket.list(prefix=prefix, delimiter=DELIM):
yield prefix.name.rsplit(DELIM, 2)[-2]
def list_packages(self, collection):
prefix = self._get_prefix(collection)
for sub_prefix in self.bucket.list(prefix=prefix, delimiter=DELIM):
yield sub_prefix.name.rsplit(DELIM, 2)[-2]
def list_resources(self, collection, package_id):
prefix = os.path.join(self._get_prefix(collection), package_id)
skip = os.path.join(prefix, MANIFEST)
offset = len(skip) - len(MANIFEST)
for key in self.bucket.get_all_keys(prefix=prefix):
if key.name == skip:
continue
yield key.name[offset:]
def __repr__(self):
return '<S3Store(%r, %r)>' % (self.bucket_name, self.prefix)
def __unicode__(self):
return os.path.join(self.bucket_name, self.prefix)
class S3StoreObject(StoreObject):
def __init__(self, store, collection, package_id, path):
self.store = store
self.package_id = package_id
self.path = path
self._key = None
self._key_name = os.path.join(collection, package_id, path)
if store.prefix:
self._key_name = os.path.join(store.prefix, self._key_name)
@property
def key(self):
if self._key is None:
self._key = self.store.bucket.get_key(self._key_name)
if self._key is None:
self._key = self.store.bucket.new_key(self._key_name)
return self._key
def exists(self):
if self._key is None:
self._key = self.store.bucket.get_key(self._key_name)
return self._key is not None
def save_fileobj(self, fileobj):
self.key.set_contents_from_file(fileobj)
def save_file(self, file_name, destructive=False):
with open(file_name, 'rb') as fh:
self.save_fileobj(fh)
def save_data(self, data):
self.key.set_contents_from_string(data)
def load_fileobj(self):
return urlopen(self.public_url())
def load_data(self):
return self.key.get_contents_as_string()
def _is_public(self):
try:
for grant in self.key.get_acl().acl.grants:
if grant.permission == 'READ':
if grant.uri == ALL_USERS:
return True
except:
pass
return False
def public_url(self):
if not self.exists():
return
# Welcome to the world of open data:
if not self._is_public():
self.key.make_public()
return self.key.generate_url(expires_in=0,
force_http=True,
query_auth=False)
def __repr__(self):
return '<S3StoreObject(%r, %r, %r)>' % (self.store, self.package_id,
self.path)
def __unicode__(self):
return self.public_url()
| mit | -4,507,298,742,891,452,000 | 33.042254 | 78 | 0.571369 | false | 3.785435 | false | false | false |
DonHilborn/DataGenerator | faker/providers/es_MX/company.py | 1 | 8627 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from ..company import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = (
'{{last_name}} {{company_suffix}}',
'{{last_name}}-{{last_name}}',
'{{company_prefix}} {{last_name}}-{{last_name}}',
'{{company_prefix}} {{last_name}} y {{last_name}}',
'{{company_prefix}} {{last_name}}, {{last_name}} y {{last_name}}',
'{{last_name}}-{{last_name}} {{company_suffix}}',
'{{last_name}}, {{last_name}} y {{last_name}}',
'{{last_name}} y {{last_name}} {{company_suffix}}'
)
catch_phrase_words = (
(
"habilidad", "acceso", "adaptador", "algoritmo", "alianza",
"analista", "aplicación", "enfoque", "arquitectura",
"archivo", "inteligencia artificial", "array", "actitud",
"medición", "gestión presupuestaria", "capacidad", "desafío",
"circuito", "colaboración", "complejidad", "concepto",
"conglomeración", "contingencia", "núcleo", "fidelidad",
"base de datos", "data-warehouse", "definición", "emulación",
"codificar", "encriptar", "extranet", "firmware",
"flexibilidad", "focus group", "previsión", "base de trabajo",
"función", "funcionalidad", "Interfaz Gráfica", "groupware",
"Interfaz gráfico de usuario", "hardware", "Soporte", "jerarquía",
"conjunto", "implementación", "infraestructura", "iniciativa",
"instalación", "conjunto de instrucciones", "interfaz",
"intranet", "base del conocimiento", "red de area local",
"aprovechar", "matrices", "metodologías", "middleware",
"migración", "modelo", "moderador", "monitorizar",
"arquitectura abierta", "sistema abierto", "orquestar",
"paradigma", "paralelismo", "política", "portal",
"estructura de precios", "proceso de mejora",
"producto", "productividad", "proyecto", "proyección",
"protocolo", "línea segura", "software", "solución",
"estandardización", "estrategia", "estructura", "éxito",
"superestructura", "soporte", "sinergia", "mediante",
"marco de tiempo", "caja de herramientas", "utilización",
"website", "fuerza de trabajo"),
(
"24 horas", "24/7", "3ra generación", "4ta generación",
"5ta generación", "6ta generación", "analizada",
"asimétrica", "asíncrona", "monitorizada por red",
"bidireccional", "bifurcada", "generada por el cliente",
"cliente servidor", "coherente", "cohesiva", "compuesto",
"sensible al contexto", "basado en el contexto",
"basado en contenido", "dedicada",
"generado por la demanda", "didactica", "direccional",
"discreta", "dinámica", "potenciada", "acompasada",
"ejecutiva", "explícita", "tolerante a fallos",
"innovadora", "amplio ábanico", "global", "heurística",
"alto nivel", "holística", "homogénea", "híbrida",
"incremental", "intangible", "interactiva", "intermedia",
"local", "logística", "maximizada", "metódica",
"misión crítica", "móbil", "modular", "motivadora",
"multimedia", "multiestado", "multitarea", "nacional",
"basado en necesidades", "neutral", "nueva generación",
"no-volátil", "orientado a objetos", "óptima", "optimizada",
"radical", "tiempo real", "recíproca", "regional",
"escalable", "secundaria", "orientada a soluciones",
"estable", "estatica", "sistemática", "sistémica",
"tangible", "terciaria", "transicional", "uniforme",
"valor añadido", "vía web", "defectos cero", "tolerancia cero"
),
(
'adaptivo', 'avanzado', 'asimilado', 'automatizado',
'balanceado', 'enfocado al negocio',
'centralizado', 'clonado', 'compatible', 'configurable',
'multiplataforma', 'enfocado al cliente', 'personalizable',
'descentralizado', 'digitizado', 'distribuido', 'diverso',
'mejorado', 'en toda la empresa', 'ergonómico', 'exclusivo',
'expandido', 'extendido', 'cara a cara', 'enfocado',
'de primera línea', 'totalmente configurable',
'basado en funcionalidad', 'fundamental', 'horizontal',
'implementado', 'innovador', 'integrado', 'intuitivo',
'inverso', 'administrado', 'mandatorio', 'monitoreado',
'multicanal', 'multilateral', 'multi-capas', 'en red',
'basado en objetos', 'de arquitectura abierta',
'Open-source', 'operativo', 'optimizado', 'opcional',
'orgánico', 'organizado', 'perseverante', 'persistente',
'polarizado', 'preventivo', 'proactivo', 'enfocado a ganancias',
'programable', 'progresivo', 'llave pública',
'enfocado a la calidad', 'reactivo', 'realineado',
're-contextualizado', 'reducido', 'con ingeniería inversa',
'de tamaño adecuado', 'robusto', 'seguro', 'compartible',
'sincronizado', 'orientado a equipos', 'total',
'universal', 'actualizable', 'centrado al usuario',
'versátil', 'virtual', 'visionario',
)
)
bsWords = (
(
'implementa', 'utiliza', 'integrata', 'optimiza',
'evoluciona', 'transforma', 'abraza', 'habilia',
'orquesta', 'reinventa', 'agrega', 'mejora', 'incentiviza',
'modifica', 'empondera', 'monetiza', 'fortalece',
'facilita', 'synergiza', 'crear marca', 'crece',
'sintetiza', 'entrega', 'mezcla', 'incuba', 'compromete',
'maximiza', 'inmediata', 'visualiza', 'inova',
'escala', 'libera', 'maneja', 'extiende', 'revoluciona',
'genera', 'explota', 'transición', 'itera', 'cultiva',
'redefine', 'recontextualiza',
),
(
'synergías', 'paradigmas', 'marcados', 'socios',
'infraestructuras', 'plataformas', 'iniciativas',
'chanales', 'communidades', 'ROI', 'soluciones',
'portales', 'nichos', 'tecnologías', 'contenido',
'cadena de producción', 'convergencia', 'relaciones',
'architecturas', 'interfaces', 'comercio electrónico',
'sistemas', 'ancho de banda', 'modelos', 'entregables',
'usuarios', 'esquemas', 'redes', 'aplicaciones', 'métricas',
'funcionalidades', 'experiencias', 'servicios web',
'metodologías'
),
(
'valor agregado', 'verticales', 'proactivas', 'robustas',
'revolucionarias', 'escalables', 'de punta', 'innovadoras',
'intuitivas', 'estratégicas', 'e-business', 'de misión crítica',
'uno-a-uno', '24/7', 'end-to-end', 'globales', 'B2B', 'B2C',
'granulares', 'sin fricciones', 'virtuales', 'virales',
'dinámicas', '24/365', 'magnéticas', 'listo para la web',
'interactivas', 'dot-com', 'sexi', 'en tiempo real',
'eficientes', 'front-end', 'distribuidas', 'extensibles',
'llave en mano', 'de clase mundial', 'open-source',
'plataforma cruzada', 'de paquete', 'empresariales',
'integrado', 'impacto total', 'inalámbrica', 'transparentes',
'de siguiente generación', 'lo último', 'centrado al usuario',
'visionarias', 'personalizado', 'ubicuas', 'plug-and-play',
'colaborativas', 'holísticas', 'ricas'
),
)
company_preffixes = ('Despacho', 'Grupo', 'Corporativo', 'Club',
'Industrias', 'Laboratorios', 'Proyectos')
company_suffixes = ('A.C.', 'S.A.', 'S.A. de C.V.', 'S.C.',
'S. R.L. de C.V.','e Hijos', 'y Asociados')
def company_prefix(self):
"""
Ejemplo: Grupo
"""
return self.random_element(self.company_preffixes)
def catch_phrase(self):
"""
:example 'Robust full-range hub'
"""
result = []
for word_list in self.catch_phrase_words:
result.append(self.random_element(word_list))
return " ".join(result)
def bs(self):
"""
:example 'integrate extensible convergence'
"""
result = []
for word_list in self.bsWords:
result.append(self.random_element(word_list))
return " ".join(result)
| mit | -2,724,498,202,587,713,500 | 49.311765 | 77 | 0.559102 | false | 3.048111 | false | false | false |
peterheim1/robbie | bin/speech_text.py | 1 | 2005 | #!/usr/bin/env python
# Author: Derek Green
PKG = 'pocketsphinx'
#import roslib; roslib.load_manifest(PKG)
import rospy
import re
import os
from std_msgs.msg import String
from subprocess import Popen, PIPE
class SpeechText():
def __init__(self):
self.pub = rospy.Publisher('speech_text', String)
rospy.init_node('speech_text_node', anonymous=True)
path = rospy.get_param("/speech_text/lm_path")
# the language model directory found at path should have a .dic and a .lm, grab them:
lm = None
dic = None
for filename in os.listdir(path):
if re.match(".*\.dic", filename):
dic = filename
if re.match(".*\.lm", filename):
lm = filename
if lm and dic:
args = ["pocketsphinx_continuous", "-hmm", "/usr/share/pocketsphinx/model/hmm/wsj1", "-lm", path + lm, "-dict", path + dic]
self.ps = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
rospy.on_shutdown( self.clean_up )
else:
print "ERROR: pocketsphinx is missing language model file. dic = " + dic + ", lm = " + lm
def speech_to_text(self):
print "ENTERING SPEECH_TEXT"
while not rospy.is_shutdown():
line = self.ps.stdout.readline()
if re.match("READY.*",line):
print "======= pocket sphinx is ready ======="
heard = re.match("\d{9}[:](.*)( [(]-\d*[)])",line)
if heard:
out = heard.group(1).lower().strip()
print "JUST HEARD: \"" + out + "\""
rospy.loginfo(out)
self.pub.publish(out)
def clean_up(self):
print "=============================== speech_txt is shutting down. Killing pocketsphinx process #", self.ps.pid
self.ps.kill()
if __name__ == '__main__':
try:
st = SpeechText()
st.speech_to_text()
rospy.spin()
except rospy.ROSInterruptException:
pass
| gpl-3.0 | 1,194,413,395,588,688,100 | 32.983051 | 135 | 0.54015 | false | 3.678899 | false | false | false |
noppanit/sweepy | sweepy.py | 1 | 3267 | #!/usr/bin/env python
import pymongo
import tweepy
from pymongo import MongoClient
from sweepy.get_config import get_config
config = get_config()
consumer_key = config.get('PROCESS_TWITTER_CONSUMER_KEY')
consumer_secret = config.get('PROCESS_TWITTER_CONSUMER_SECRET')
access_token = config.get('PROCESS_TWITTER_ACCESS_TOKEN')
access_token_secret = config.get('PROCESS_TWITTER_ACCESS_TOKEN_SECRET')
MONGO_URL = config.get('MONGO_URL')
MONGO_PORT = config.get('MONGO_PORT')
MONGO_USERNAME = config.get('MONGO_USERNAME')
MONGO_PASSWORD = config.get('MONGO_PASSWORD')
MONGO_DATABASE = config.get('MONGO_DATABASE')
client = MongoClient(MONGO_URL, int(MONGO_PORT))
print 'Establishing Tweepy connection'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, retry_count=3)
db = client[MONGO_DATABASE]
db.authenticate(MONGO_USERNAME, MONGO_PASSWORD)
raw_tweets = db.raw_tweets
users = db.users
def is_user_in_db(screen_name):
return get_user_from_db(screen_name) is None
def get_user_from_db(screen_name):
return users.find_one({'screen_name' : screen_name})
def get_user_from_twitter(user_id):
return api.get_user(user_id)
def get_followers(screen_name):
users = []
for i, page in enumerate(tweepy.Cursor(api.followers, id=screen_name, count=200).pages()):
print 'Getting page {} for followers'.format(i)
users += page
return users
def get_friends(screen_name):
users = []
for i, page in enumerate(tweepy.Cursor(api.friends, id=screen_name, count=200).pages()):
print 'Getting page {} for friends'.format(i)
users += page
return users
def get_followers_ids(screen_name):
ids = []
try:
for i, page in enumerate(tweepy.Cursor(api.followers_ids, id=screen_name, count=5000).pages()):
print 'Getting page {} for followers ids'.format(i)
ids += page
except tweepy.error.TweepError as e:
print e.message
return ids
def get_friends_ids(screen_name):
ids = []
try:
for i, page in enumerate(tweepy.Cursor(api.friends_ids, id=screen_name, count=5000).pages()):
print 'Getting page {} for friends ids'.format(i)
ids += page
except tweepy.error.TweepError as e:
print e.message
return ids
def process_user(user):
screen_name = user['screen_name']
print 'Processing user : {}'.format(screen_name)
if is_user_in_db(screen_name):
user['followers_ids'] = get_followers_ids(screen_name)
user['friends_ids'] = get_friends_ids(screen_name)
users.insert_one(user)
else:
print '{} exists!'.format(screen_name)
print 'End processing user : {}'.format(screen_name)
if __name__ == "__main__":
for doc in raw_tweets.find({'processed' : {'$exists': False}}):
print 'Start processing'
try:
process_user(doc['user'])
except KeyError:
pass
try:
process_user(doc['retweeted_status']['user'])
except KeyError:
pass
raw_tweets.update_one({'_id': doc['_id']}, {'$set':{'processed':True}})
| mit | -7,822,002,205,192,745,000 | 28.7 | 103 | 0.654729 | false | 3.313387 | true | false | false |
kaajavi/ninformes | escolar/test_dev.py | 1 | 3482 | from django.shortcuts import render_to_response
from django.template import RequestContext
import os
from django.conf import settings
from django.template import Context
from django.template.loader import get_template
from xhtml2pdf import pisa #INSTALAR ESTA LIBRERIA
from django.templatetags.static import static
from django.http import HttpResponseRedirect, HttpResponse
from escolar.models import Docente, Curso, Alumno, MatriculaAlumnado, Campo, MatriculaDocentes, SITUACION_DOCENTE, TIPO_MATRICULA_DOCENTE, ItemCampo, DescripcionCampo
FILE_LIST = settings.BASE_DIR+'/test.pdf'
# Convert HTML URIs to absolute system paths so xhtml2pdf can access those resources
def link_callback(uri, rel):
# use short variable names
sUrl = settings.STATIC_URL # Typically /static/
sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/
mUrl = settings.MEDIA_URL # Typically /static/media/
mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/BASE_DIR
# convert URIs to absolute system paths
if uri.startswith(mUrl):
path = os.path.join(mRoot, uri.replace(mUrl, ""))
elif uri.startswith(sUrl):
path = os.path.join(sRoot, uri.replace(sUrl, ""))
# make sure that file exists
if not os.path.isfile(path):
raise Exception(
'media URI must start with %s or %s' % \
(sUrl, mUrl))
return path
def view_create_principal(request):
context = RequestContext(request)
return render_to_response('testing/test_create_principal.html', {},context)
###para cfk
from django import forms
from ckeditor.widgets import CKEditorWidget
class ExampleCFKForm(forms.Form):
content = forms.CharField(widget=CKEditorWidget())
def test_cfkeditor(request):
context = RequestContext(request)
form = ExampleCFKForm()
return render_to_response('testing/test_cfkeditor.html', {'form':form},context)
def test_generar_informe_matricula(request):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="informe_test.pdf"'
from escolar.default_data.images_base64 import LOGO_PROVINCIAL
# Prepare context
matricula = MatriculaAlumnado.objects.get(pk=112)
descrCampo = DescripcionCampo.objects.filter(matricula_alumno=matricula, semestre=1, campo__especial=False)
descrCampoInstitucionales = DescripcionCampo.objects.filter(matricula_alumno=matricula, semestre=1, campo__especial=True)
data = {'etapa':1,
'matricula':matricula,
'descrCampo':descrCampo,
'descrCampoInstitucionales':descrCampoInstitucionales,
'logo_provincial':LOGO_PROVINCIAL
}
# Render html content through html template with context
template = get_template('informe/_informe.html')
html = template.render(Context(data))
# Write PDF to file
file = open(FILE_LIST, "w+b")
pisaStatus = pisa.CreatePDF(html, dest=file,
link_callback = link_callback)
# Return PDF document through a Django HTTP response
file.seek(0)
pdf = file.read()
file.close()
response.write(pdf)
# Don't forget to close the file handle
#BORRO EL ARCHIVO
if os.path.exists(FILE_LIST):
try:
os.remove(FILE_LIST)
except OSError, e:
pass
return response | gpl-2.0 | 624,910,965,018,021,500 | 34.907216 | 166 | 0.688685 | false | 3.585994 | true | false | false |
cloudnull/genastack_roles | genastack_roles/nova_api_os_compute/__init__.py | 1 | 1103 | # =============================================================================
# Copyright [2013] [Kevin Carter]
# License Information :
# This software has no warranty, it is provided 'as is'. It is your
# responsibility to validate the behavior of the routines and its accuracy
# using the code provided. Consult the GNU General Public license for further
# details (see GNU General Public License).
# http://www.gnu.org/licenses/gpl.html
# =============================================================================
BUILD_DATA = {
'nova_api_os_compute': {
'help': 'Install nova OS Compute API from upstream',
'required': [
'nova'
],
'init_script': [
{
'help': 'Start and stop nova on boot',
'init_path': '/etc/init.d',
'name': 'nova',
'chuid': 'nova',
'chdir': '/var/lib/nova',
'options': '--'
' --config-file=/etc/nova/nova.conf',
'program': 'nova-api-os-compute'
}
]
}
}
| gpl-3.0 | -5,563,318,845,062,327,000 | 35.766667 | 79 | 0.446056 | false | 4.576763 | false | false | false |
svenfraeys/sftoolbox | sftoolboxmaya/widgets.py | 1 | 1480 | import sftoolboxmaya.utils
from sftoolboxqt import qtgui
from sftoolboxqt.widgets import ProjectWidget
class MayaProjectWidget(sftoolboxmaya.utils.DialogWidget):
"""toolbox widget
"""
def _wrapped_set_window_title(self, func):
"""wrap for the set window title to keep it synced
"""
def wrapped_func(text):
self.setWindowTitle(text)
func(text)
return wrapped_func
def __init__(self, project=None, parent=None):
"""settings and context are given for the init
"""
super(MayaProjectWidget, self).__init__()
layout = qtgui.QVBoxLayout()
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self._toolbox_widget = ProjectWidget(project)
layout.addWidget(self._toolbox_widget)
self.setWindowTitle(self._toolbox_widget.windowTitle())
# wrap the set window title so we keep it in sync
self._toolbox_widget.setWindowTitle = self._wrapped_set_window_title(
self._toolbox_widget.setWindowTitle)
@property
def project(self):
return self._toolbox_widget.project
@project.setter
def project(self, value):
self._toolbox_widget.project = value
@property
def active_panel(self):
return self._toolbox_widget.active_panel
@active_panel.setter
def active_panel(self, value):
self._toolbox_widget.active_panel = value
| mit | 6,410,005,747,240,740,000 | 28.6 | 77 | 0.648649 | false | 4.01084 | false | false | false |
RyanofRIT/DjangoWebsite | HealthNet/HealthNet/urls.py | 1 | 2087 | from django.conf.urls import include, url, patterns
from django.contrib import admin
from HealthApp import views
"""
The urlpatterns is how we map the site urls to specific views in the views.py. The first part is
a regular expression to describe the url pattern, followed by the view that should be called.
Lastly, a name is given to each pattern so that they can be referenced from elsewhere in the code.
For example, when an HTTPResponseRedirect(reverse('login')) is returned in one of the views, it
is doing a reverse lookup of the url pattern named 'login' and returning the view (and subsequently
the html page) associated with the view.
There are a couple patterns that are a bit unique. The first is the url for the admin page which
links to the built in url network already created by django. The other unique urls are the ones
that deal with patient information since the urls are specific to the patient, and the username in
the url needs to be passed into the view as a parameter. The format of (?P<username>\w+) is used
to first identify that information is being captured, and to identify what parameter it is being passed
in as (in this case, the username parameter).
Note: the first url is used to redirect users to the login page when at the 'root' url of the site.
"""
urlpatterns = [
url(r'^$', views.userLogin, name='login'),
url(r'^login/$', views.userLogin, name='login'),
url(r'^register/$', views.register, name='register'),
url(r'^(?P<username>\w+)/profile/$', views.profile, name='profile'),
url(r'^(?P<username>\w+)/staffProfile/$', views.staffProfile, name='staffProfile'),
url(r'^(?P<username>\w+)/staffProfile/(?P<patient>\w+)$', views.updateUser, name='updateUser'),
url(r'^logout/$', views.userLogout, name='logout'),
url(r'^admin/', include(admin.site.urls)),
url(r'^profileEdit/$', views.profileEdit, name='profileEdit'),
url(r'^createAppForm/', views.createApp, name='createAppForm'),
url(r'^deleteAppForm/(\d+)$', views.deleteApp, name='deleteAppForm'),
url(r'^export/$', views.export, name='export')
] | gpl-2.0 | 9,009,979,145,393,319,000 | 62.272727 | 103 | 0.729756 | false | 3.93032 | false | false | false |
LEAMgroup/leam.stress | leam/stress/interfaces/stressanalysis.py | 1 | 1042 | from zope.interface import Interface
# -*- Additional Imports Here -*-
from zope import schema
from leam.stress import stressMessageFactory as _
class IStressAnalysis(Interface):
"""Frontend to the LEAM Stress Analysis Model"""
# -*- schema definition goes here -*-
layer = schema.Object(
title=_(u"GIS Layer"),
required=True,
description=_(u"A GIS layer with the environmentally sensitive areas."),
schema=Interface, # specify the interface(s) of the addable types here
)
#
scenario = schema.Object(
title=_(u"LUC Scenario"),
required=True,
description=_(u"An existing LUC Scenario with it's associated probability maps."),
schema=Interface, # specify the interface(s) of the addable types here
)
#
section = schema.Object(
title=_(u"Section Map"),
required=False,
description=_(u"Section layer used to split the sensative layer."),
schema=Interface, # specify the interface(s) of the addable types here
)
#
| gpl-2.0 | -7,602,434,538,923,923,000 | 30.575758 | 90 | 0.658349 | false | 4.341667 | false | false | false |
dereulenspiegel/spotimc | resources/libs/spotimcgui/views/__init__.py | 1 | 7854 | '''
Copyright 2011 Mikel Azkolain
This file is part of Spotimc.
Spotimc is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Spotimc is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Spotimc. If not, see <http://www.gnu.org/licenses/>.
'''
import xbmc
import xbmcgui
import weakref
from inspect import isfunction
def iif(cond, on_true, on_false):
if cond:
if not isfunction(on_true):
return on_true
else:
return on_true()
else:
if not isfunction(on_false):
return on_false
else:
return on_false()
class ViewManager:
__window = None
__view_list = None
__position = None
__vars = None
def __init__(self, window):
self.__window = weakref.proxy(window)
self.__view_list = []
self.__position = -1
self.__vars = {}
def num_views(self):
return len(self.__view_list)
def position(self):
return self.__position
def has_next(self):
return(
self.num_views() > 0
and self.position() < self.num_views() - 1
)
def _show_view(self, view):
view.show(self)
container_id = view.get_container_id()
if container_id is not None:
xbmc.executebuiltin("Control.SetFocus(%d)" % container_id)
def next(self):
#Fail if no next window
if not self.has_next():
raise IndexError("No more views available")
#If there's one active
if self.__position != -1:
self.__view_list[self.__position].hide(self)
#Show the next one
self.__position += 1
self._show_view(self.__view_list[self.__position])
def has_previous(self):
return self.__position > 0
def previous(self):
#Fail if no previous window
if not self.has_previous():
raise IndexError("No previous views available")
#Hide current
self.__view_list[self.__position].hide(self)
#Show previous
self.__position -= 1
self._show_view(self.__view_list[self.__position])
def add_view(self, view):
#Remove all views that come next (if any)
del self.__view_list[self.__position+1:]
#Add the new one
self.__view_list.append(view)
#Go to the next view
self.next()
def click(self, control_id):
self.__view_list[self.__position].click(self, control_id)
def show(self, give_focus=True):
self.__view_list[self.__position].show(self, give_focus)
def clear_views(self):
#Check at least if a view is visible
if self.__position != -1:
#Hide current
self.__view_list[self.__position].hide(self)
#Delete all views
self.__view_list = []
#And reset the position counter
self.__position = -1
def set_var(self, name, value):
self.__vars[name] = value
def get_var(self, name):
return self.__vars[name]
def get_window(self):
return self.__window
class BaseView:
__is_visible = None
def __init__(self):
self.__is_visible = False
def is_visible(self):
return self.__is_visible
def click(self, view_manager, control_id):
pass
def show(self, view_manager, give_focus=True):
self.__is_visible = True
def hide(self, view_manager):
self.__is_visible = False
def back(self, view_manager):
pass
def get_container_id(self):
pass
class BaseContainerView(BaseView):
def render(self, view_manager):
"""Tell the view to render it's content.
The view should return True if the content was rendered successfully,
and False if data was not still available.
"""
raise NotImplementedError()
def get_container(self, view_manager):
raise NotImplementedError()
def show(self, view_manager, give_focus=True):
BaseView.show(self, view_manager, give_focus)
#Hide container and show loading anim.
self.get_container(view_manager).setVisibleCondition('false')
view_manager.get_window().show_loading()
if self.render(view_manager):
#Hide loading and show container
view_manager.get_window().hide_loading()
self.get_container(view_manager).setVisibleCondition('true')
#And give focus if asked to do so
if give_focus:
view_manager.get_window().setFocus(
self.get_container(view_manager)
)
def hide(self, view_manager):
BaseView.hide(self, view_manager)
#Just hide the container
self.get_container(view_manager).setVisibleCondition('false')
class BaseListContainerView(BaseContainerView):
__list_position = None
def get_list(self, view_manager):
raise NotImplementedError()
def show(self, view_manager, give_focus=True):
BaseView.show(self, view_manager, give_focus)
window = view_manager.get_window()
#Hide container and show loading anim.
self.get_container(view_manager).setVisibleCondition('false')
window.show_loading()
if self.render(view_manager):
#If we have a stored list position
if self.__list_position is not None:
self.get_list(view_manager).selectItem(self.__list_position)
#Not list position? Set it on the start
else:
self.get_list(view_manager).selectItem(0)
#List was rendered but with no items, add a placeholder
if self.get_list(view_manager).size() == 0:
window.setProperty('ListWithNoItems', 'true')
item = xbmcgui.ListItem()
item.setProperty('NoItems', 'true')
self.get_list(view_manager).addItem(item)
else:
window.setProperty('ListWithNoItems', 'false')
#Hide loading and show container
window.hide_loading()
self.get_container(view_manager).setVisibleCondition('true')
#And give focus if asked to do so
if give_focus:
view_manager.get_window().setFocus(
self.get_container(view_manager)
)
def hide(self, view_manager):
BaseView.hide(self, view_manager)
#Keep the list position
list_obj = self.get_list(view_manager)
self.__list_position = list_obj.getSelectedPosition()
#And call the container stuff
BaseContainerView.hide(self, view_manager)
| gpl-3.0 | 4,706,419,649,641,739,000 | 25.950178 | 77 | 0.540998 | false | 4.442308 | false | false | false |
meeza/PythonWork | SearchEngines/crawler.py | 1 | 1625 | import urllib
def get_page(url):
try:
return urllib.urlopen(url).read()
except:
return ""
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def add_to_index(index, keyword, url):
if keyword in index:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index, keyword):
if keyword in index:
return index[keyword]
else:
return None
def crawl_web(seed): # returns index, graph of inlinks
tocrawl = [seed]
crawled = []
graph = {} # <url>, [list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
| mit | -1,297,772,012,444,380,000 | 22.214286 | 54 | 0.548308 | false | 3.532609 | false | false | false |
jwhitlock/kuma | kuma/users/signal_handlers.py | 1 | 4279 | from allauth.account.signals import email_confirmed, user_signed_up
from allauth.socialaccount.signals import social_account_removed
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from waffle import switch_is_active
from kuma.core.urlresolvers import reverse
from kuma.wiki.jobs import DocumentContributorsJob
from .jobs import UserGravatarURLJob
from .models import User, UserBan
from .tasks import send_welcome_email
@receiver(post_save, sender=User, dispatch_uid='users.user.post_save')
def on_user_save(sender, instance, created, **kwargs):
"""
A signal handler to be called after saving a user.
Invalidates the cache for the given user's gravatar URL.
"""
job = UserGravatarURLJob()
if instance.email:
handler = job.invalidate
elif instance.email is None:
handler = job.delete
else:
return
# do the heavy-lifting for all avatar sizes
for size in settings.AVATAR_SIZES:
handler(instance.email, size=size)
@receiver(user_signed_up, dispatch_uid='users.user_signed_up')
def on_user_signed_up(sender, request, user, **kwargs):
"""
Signal handler to be called when a given user has signed up.
"""
url = reverse('wiki.document', args=['MDN/Getting_started'])
msg = _('You have completed the first step of '
'<a href="%s">getting started with MDN</a>') % url
messages.success(request, msg)
if switch_is_active('welcome_email'):
# only send if the user has already verified
# at least one email address
if user.emailaddress_set.filter(verified=True).exists():
send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)
@receiver(email_confirmed, dispatch_uid='users.email_confirmed')
def on_email_confirmed(sender, request, email_address, **kwargs):
"""
Signal handler to be called when a given email address was confirmed
by a user.
"""
if switch_is_active('welcome_email'):
# only send if the user has exactly one verified (the given)
# email address, in other words if it was just confirmed
user = email_address.user
previous_emails = user.emailaddress_set.exclude(pk=email_address.pk)
if not previous_emails.exists():
send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)
@receiver(social_account_removed, dispatch_uid='users.social_account_removed')
def on_social_account_removed(sender, request, socialaccount, **kwargs):
"""
Invoked just after a user successfully removed a social account
We use it to reset the name of the socialaccount provider in
the user's session to one that he also has.
"""
user = socialaccount.user
try:
all_socialaccounts = user.socialaccount_set.all()
next_socialaccount = all_socialaccounts[0]
request.session['sociallogin_provider'] = next_socialaccount.provider
request.session.modified = True
except (ObjectDoesNotExist, IndexError):
pass
@receiver(post_save, sender=UserBan, dispatch_uid='users.user_ban.save')
def on_ban_save(sender, instance, **kwargs):
"""
Signal handler to be called when a given user ban is saved.
"""
user = instance.user
user.is_active = not instance.is_active
user.save()
invalidate_document_contribution(user)
@receiver(post_delete, sender=UserBan, dispatch_uid='users.user_ban.delete')
def on_ban_delete(sender, instance, **kwargs):
"""
Signal handler to be called when a user ban is deleted.
"""
user = instance.user
user.is_active = True
user.save()
invalidate_document_contribution(user)
def invalidate_document_contribution(user):
"""
Invalidate the contributor list for Documents the user has edited.
This will remove them if they have been banned, and add them if they
have been unbanned.
"""
revisions = user.created_revisions
doc_ids = set(revisions.values_list('document_id', flat=True))
job = DocumentContributorsJob()
for doc_id in doc_ids:
job.invalidate(doc_id)
| mpl-2.0 | -3,534,320,353,739,426,300 | 34.658333 | 78 | 0.703903 | false | 3.900638 | false | false | false |
Samcbehrens/VisualAnalytics | dataProcessing/USTimeline.py | 1 | 2413 | import csv
import datetime
import json
import calendar
from timeline3 import convertToFile
def convertTime(dateAsString):
MillisecNum=''
conv =''
if len(dateAsString)>4:
conv = datetime.datetime.strptime(dateAsString, '%m/%d/%Y')
MillisecNum = calendar.timegm(conv.timetuple())
else:
numberAsInt = int(dateAsString)
d = datetime.datetime(numberAsInt,1,1)
MillisecNum = calendar.timegm(d.timetuple())
MillisecNum = MillisecNum *1000
return MillisecNum
def readCsv():
allInformation = []
with open('usTimeline.csv', 'rb') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(), delimiters=',')
csvfile.seek(0)
reader=csv.reader(csvfile,dialect)
for line in reader:
print line
allInformation.append(line)
return allInformation
def reformat(allInformation):
newFormation =[]
for i in range(0, len(allInformation)):
## get index out of range if you dont check this first
if i+1 < len(allInformation)-1:
##look ahead to see if the next one doesnt have a date
if allInformation[i+1][0]=='':
allInformation[i+1][0]=allInformation[i][0]
#add if it has the correct date
thisPotYear = allInformation[i][0]
if thisPotYear.isdigit():
newFormation.append(allInformation[i])
return newFormation
def webToJson(soup):
## formatting to turn into correct json
colors = ["red","orange", "yellow", "green", "blue"]
timeline = {"label": "usTimeline", "times": []}
addEvent={"color":"blue", "description":"description", "starting_time": 1}
## Must be in a certain format have to put in a array and then a set...crying
outerMost = []
print soup
for n in soup:
print n
print type(n)
if n[1] != '':
print n[1]
millis = convertTime(n[1])
addEvent["starting_time"] = millis
if n[0].isdigit():
millis = convertTime(n[0])
addEvent["starting_time"] = millis
addEvent["description"] = n[2]
if addEvent["description"]!="description" and addEvent["starting_time"]!=1:
addEvent["color"]='orange'
print 'addingEvent'
print addEvent
timeline["times"].append(addEvent)
addEvent={"color":"blue", "description":"description", "starting_time": 1}
outerMost.append(timeline)
return outerMost
if __name__ == '__main__':
allInformation = readCsv()
newFormation = reformat(allInformation)
finalFormation = webToJson(newFormation)
convertToFile('usTimeline.json',finalFormation)
| bsd-3-clause | 5,171,126,104,977,085,000 | 23.13 | 79 | 0.688355 | false | 3.125648 | false | false | false |
sxjscience/tvm | python/tvm/topi/nn/conv2d_transpose.py | 1 | 6465 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Transposed 2D convolution operators (sometimes called Deconvolution)."""
import tvm
from tvm import te
from tvm import relay
from .dilate import dilate
from .pad import pad
from .util import get_pad_tuple
from ..util import simplify
def conv2d_transpose_nchw(Input, Filter, strides, padding, out_dtype, output_padding):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return declaration_conv2d_transpose_impl(
Input, Filter, strides, padding, out_dtype, output_padding=output_padding
)
def conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype, output_padding):
"""Preprocess data and kernel to make the compute pattern
of conv2d_transpose the same as conv2d"""
batch, in_c, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert opad_h < stride_h and opad_w < stride_w
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_c, filter_h, filter_w),
lambda o, i, h, w: kernel[i][o][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
return data_pad, kernel_transform
def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):
"""Implementation of conv2d transpose"""
data_pad, kernel_transform = conv2d_transpose_nchw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
batch, in_c, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_c = simplify(out_c)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_c), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
Output = te.compute(
(batch, out_c, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[b, dc, h + dh, w + dw].astype(out_dtype)
* kernel_transform[c, dc, dh, dw].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="conv2d_transpose_nchw",
)
return Output
@tvm.target.generic_func
def conv2d_transpose_legalize(attrs, inputs, types):
"""Legalizes Transposed 2D convolution op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed 2D convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
if attrs["data_layout"] == "NHWC":
data, kernel = inputs
kernel_layout = attrs["kernel_layout"]
# Convert Kernel layout to IOHW
# kernel_layout is different from input kernel layout - IO is swapped
if kernel_layout == "HWIO":
# input kernel layout is swapped to HWOI
# output kernel layout will be IOHW
kernel = relay.transpose(kernel, axes=(3, 2, 0, 1))
elif kernel_layout == "HWOI":
# input kernel layout is swapped to HWIO
# output kernel layout will be IOHW
kernel = relay.transpose(kernel, axes=(2, 3, 0, 1))
elif kernel_layout == "IOHW":
# input kernel layout is swapped to OIHW
# output kernel layout will be IOHW
kernel = relay.transpose(kernel, axes=(1, 0, 2, 3))
elif kernel_layout == "OIHW":
# input kernel layout is swapped to IOHW
# output kernel layout will be IOHW
pass
else:
# Skip legalize. Let relay.nn.conv2d_transpose to handle the case
return None
# Set new attrs for conv2d_transpose.
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["data_layout"] = "NCHW"
# layout of kernel should be IOHW, but kernel_layout should be swapped - OIHW
new_attrs["kernel_layout"] = "OIHW"
# Convert data to NCHW.
data = relay.transpose(data, axes=(0, 3, 1, 2))
deconv = relay.nn.conv2d_transpose(data, kernel, **new_attrs)
# Convert back to original NHWC layout.
out = relay.transpose(deconv, axes=(0, 2, 3, 1))
return out
return None
| apache-2.0 | -5,046,389,322,234,244,000 | 35.942857 | 98 | 0.636195 | false | 3.579734 | false | false | false |
pgurumur/netconf | core/lib/ip.py | 1 | 12491 | # Copyright (c) 2015 Prabhu Gurumurthy <[email protected]>
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# XXX: Parts of this code, marked with %{ %} are under
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
__version__ = "$Revision: da23e41c6f10 $"
__author__ = "$Author: pgurumur $"
__modified__ = "$Date: 2013-06-16 08:41:30Z $"
# import everything from netaddr 0.7.x
from netaddr import IPNetwork, IPAddress, AddrFormatError, ZEROFILL
from exception import IPError
# unified IPv4/IPv6 class for handling network and address type for both the
# families
class ip(object):
# Class takes in one argument, this argument can be an IPv4/IPv6 subnet
# or it can be an IPv4/IPv6 address
def __init__(self, IPAddr, **Keywords):
# Main instance of either CIDR/IP class from netaddr
self._ip = None
# Whether given argument is a network or an address
self._isNetwork = False
if IPAddr is not None:
if isinstance(IPAddr, (IPNetwork, ip)):
self._ipAddr(str(IPAddr))
elif isinstance(IPAddr, IPAddress):
self._ip = IPAddr
elif isinstance(IPAddr, (int, long)):
if "version" in Keywords:
ipaddr = None
# This function takes in a integer and converts into IPaddress
# if prefix is given, then make it into a network and
# instantiates the same class, raises exception.IPError,
# if there is a problem
# Works for both IPv4 and IPv6
if int(Keywords["version"]) == 4:
# For IPv4 addresses usually
octets = []
for _ in xrange(4):
octets.insert(0, str(IPAddr & 0xFF))
IPAddr >>= 8
ipaddr = ".".join(octets)
elif int(Keywords["version"]) == 6:
"""
# For IPv6 addresses usually
hexstr = "%32x" %IPAddr
hextets = []
for ix in range(0, 32, 4):
hextets.append("%x" %int(hexstr[ix:ix + 4], 16))
hextets = self._compress(hextets)
ipaddr = ":".join(hextets)
"""
hextets = '0' * 32 + hex(IPAddr)[2:-1]
temp = ""
for ix in xrange(1, 33):
temp = hextets[-ix] + temp
if ix % 4 == 0:
temp = ':' + temp
ipaddr = temp[1:]
else:
raise IPError("unknown IP version")
if "prefix" in Keywords:
if Keywords['prefix']:
ipaddr = "%s/%d" %(ipaddr, Keywords["prefix"])
self._ipAddr(ipaddr)
else:
raise IPError("no version defined!")
elif isinstance(IPAddr, (str, unicode)):
self._ipAddr(str(IPAddr))
else:
raise IPError("unknown object instance: %s" %type(IPAddr))
else:
raise IPError("no ip address/subnet defined!")
def _ipAddr(self, Str):
# Function that forms either IPNetwork or IPaddress instantiation
# based on given string, used for constructor with following objects
# IPNetwork
# IPAddress
# ip
# String
if isinstance(Str, str):
# Check to see whether we have a forward slash, if we do, mostly
# it is a network, we still will verify the prefixlen if the
# prefixlen is 32 or 128, then it is automatically converted into
# IPAddress format
iplen = len(Str.split("/"))
try:
if iplen == 2:
# String with forward slash
self._ip = IPNetwork(Str, implicit_prefix = True,
flags = ZEROFILL)
prefix = self._ip.prefixlen
# if the prefixlen is 32 or 128 it is an IPAddress
if (prefix == 32) or (prefix == 128):
self._ip = IPAddress(Str.split("/")[0], flags = ZEROFILL)
# The following if block is necessary, otherwise for a
# /32 bit IPv6 address, it would be treated as an address
# instead of subnet
if (prefix == 32) and (self._ip.version != 4):
self._ip = IPNetwork(Str, implicit_prefix = True,
flags = ZEROFILL)
self._isNetwork = True
else:
# Toggle to the network flag
self._isNetwork = True
elif iplen == 1:
self._ip = IPAddress(Str, flags = ZEROFILL)
else:
raise IPError("invalid IPv4/IPv6 address: %s" %Str)
except ValueError as err:
raise IPError(str(err))
except AddrFormatError as err:
raise IPError(str(err))
def _compress(self, hextets):
# %{
# From ipaddr.py
if hextets:
"""
Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index in range(len(hextets)):
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
# %}
return hextets
def subnets(self, SubnetValue = 0):
try:
temp = int(SubnetValue)
except ValueError as err:
raise IPError(err)
else:
if self._isNetwork:
try:
for items in list(self._ip.subnet(SubnetValue)):
yield ip(items)
except IPError as err:
raise IPError(err)
def netmask(self, Wildcard = False):
# Only IPv4 functions and the names are self explantory
retval = None
if self._isNetwork and (self._ip.version == 4):
if Wildcard:
retval = self._ip.hostmask
else:
retval = self._ip.netmask
return ip(retval)
@property
def broadcast(self):
# Only IPv4 functions and the names are self explantory
retval = None
if self._isNetwork and (self._ip.version == 4):
retval = ip(self._ip.broadcast)
return retval
# Get the size of the network
@property
def size(self):
retval = 1
if self._isNetwork:
retval = self._ip.size
return retval
# Binary values (in bits) of the IPv4/IPv6 address
@property
def binary(self):
retval = None
if not self._isNetwork:
retval = self._ip.bits()
return retval
# used for 'in' operand
def __contains__(self, IPAddr):
retval = False
if self._isNetwork:
temp = None
try:
temp = ip(IPAddr)
except IPError as err:
raise IPError(err)
else:
address = None
if temp._isNetwork:
address = IPNetwork(str(IPAddr), flags = ZEROFILL)
else:
address = IPAddress(str(IPAddr), flags = ZEROFILL)
if address in self._ip:
retval = True
return retval
# for int function
def __int__(self):
retval = None
if self._isNetwork:
retval = self._ip.value
else:
retval = int(self._ip)
return retval
# For list function
def __iter__(self):
if self._isNetwork:
try:
for items in list(self._ip):
yield ip(items)
except IndexError as err:
raise IPError(err)
else:
yield "%s" %self
# for len function
def __len__(self):
retval = 0
if self._isNetwork:
retval = self._ip.prefixlen
else:
if self.version == 4:
retval = 32
elif self.version == 6:
retval = 128
return retval
# for str function
def __str__(self):
return str(self._ip).encode('utf-8')
def __repr__(self):
return repr(str(self))
def __unicode__(self):
return unicode(str(self))
# for hex function
def __hex__(self):
retval = None
if self._isNetwork:
retval = hex(self._ip.value)
else:
retval = hex(self._ip)
return retval
def __oct__(self):
return oct(int(self))
def __add__(self, obj):
if isinstance(obj, str):
temp = str(self._ip) + "%s" %obj
self._ipAddr(temp)
else:
raise IPError("invalid type ('%s') to add" %type(obj))
return self._ip
def __getitem__(self, Key):
retval = None
if isinstance(Key, int):
try:
if self.size > 1:
retval = str(self._ip[int(Key)])
else:
retval = str(self._ip)
except ValueError as err:
raise IPError(err)
except IndexError as err:
raise IPError(err)
else:
raise IPError("cannot get index value for non integer type key")
return retval
def __eq__(self, other):
retval = False
if int(self) == int(other):
retval = True
return retval
@property
def reverse(self):
retval = None
if len(self) == 32:
retval = self._ip.reverse_dns.split(".in-addr")[0]
elif len(self) == 128:
retval = self._ip.reverse_dns.split(".ip6")[0]
return retval
ismulticast = property(fget = lambda self: self._ip.is_multicast())
isreserved = property(fget = lambda self: self._ip.is_reserved())
version = property(fget = lambda self: self._ip.version)
value = property(fget = lambda self: int(self))
length = property(fget = lambda self: len(self))
private = property(fget = lambda self: self._ip.is_private())
| isc | -6,283,409,211,468,774,000 | 31.698953 | 77 | 0.553519 | false | 4.266052 | false | false | false |
osmr/utct | TFLearn/feed_dict_flow_cp.py | 1 | 1917 | from tflearn import data_flow
class FeedDictFlowCp(data_flow.FeedDictFlow):
"""
Wrapper of TFLearn's FeedDictFlow for some types of augmentation.
"""
def __init__(self,
feed_dict,
coord,
batch_size=128,
num_threads=8,
max_queue=32,
shuffle=False,
continuous=False,
ensure_data_order=False,
dprep_dict=None,
daug_dict=None,
index_array=None):
super(FeedDictFlowCp, self).__init__(feed_dict,
coord,
batch_size,
num_threads,
max_queue,
shuffle,
continuous,
ensure_data_order,
dprep_dict,
daug_dict,
index_array)
def fill_feed_dict_queue(self):
while not self.coord.should_stop() and not self.interrupted:
batch_ids = self.batch_ids_queue.get()
if batch_ids is False:
break
data = self.retrieve_data(batch_ids)
# Apply augmentation according to daug dict
if self.daug_dict:
for k in self.daug_dict:
data = self.daug_dict[k].apply(data)
# Apply preprocessing according to dprep dict
if self.dprep_dict:
for k in self.dprep_dict:
data[k] = self.dprep_dict[k].apply(data[k])
# all prepped, put the data into the queue
self.feed_dict_queue.put(data) | mit | 4,391,301,540,408,282,000 | 36.607843 | 69 | 0.411581 | false | 5.098404 | false | false | false |
HybridF5/jacket | jacket/db/storage/api.py | 1 | 44700 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
Functions in this module are imported into the storage.db namespace. Call these
functions from storage.db namespace, not the storage.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/storage/storage.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from oslo_config import cfg
from oslo_db import concurrency as db_concurrency
from oslo_db import options as db_options
from jacket.api.storage import common
from jacket.common.storage import constants
from jacket.storage.i18n import _
db_opts = [
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('volume_name_template',
default='volume-%s',
help='Template string to be used to generate volume names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
cfg.StrOpt('backup_name_template',
default='backup-%s',
help='Template string to be used to generate backup names'), ]
CONF = cfg.CONF
CONF.register_opts(db_opts)
db_options.set_defaults(CONF)
CONF.set_default('sqlite_db', 'jacket.db.storage.sqlite', group='database')
_BACKEND_MAPPING = {'sqlalchemy': 'jacket.db.storage.sqlalchemy.api'}
IMPL = db_concurrency.TpoolDbapiWrapper(CONF, _BACKEND_MAPPING)
# The maximum value a signed INT type may have
MAX_INT = constants.DB_MAX_INT
###################
def dispose_engine():
"""Force the engine to establish new connections."""
# FIXME(jdg): When using sqlite if we do the dispose
# we seem to lose our DB here. Adding this check
# means we don't do the dispose, but we keep our sqlite DB
# This likely isn't the best way to handle this
if 'sqlite' not in IMPL.get_engine().name:
return IMPL.dispose_engine()
else:
return
###################
#def service_destroy(context, service_id):
# """Destroy the service or raise if it does not exist."""
# return IMPL.service_destroy(context, service_id)
#def service_get(context, service_id):
# """Get a service or raise if it does not exist."""
# return IMPL.service_get(context, service_id)
#def service_get_by_host_and_topic(context, host, topic):
# """Get a service by host it's on and topic it listens to."""
# return IMPL.service_get_by_host_and_topic(context, host, topic)
#
#
#def service_get_all(context, filters=None):
# """Get all services."""
# return IMPL.service_get_all(context, filters)
#def service_get_all_by_topic(context, topic, disabled=None):
# """Get all services for a given topic."""
# return IMPL.service_get_all_by_topic(context, topic, disabled=disabled)
#def service_get_all_by_binary(context, binary, disabled=None):
# """Get all services for a given binary."""
# return IMPL.service_get_all_by_binary(context, binary, disabled)
#def service_get_by_args(context, host, binary):
# """Get the state of a service by node name and binary."""
# return IMPL.service_get_by_args(context, host, binary)
#def service_create(context, values):
# """Create a service from the values dictionary."""
# return IMPL.service_create(context, values)
#def service_update(context, service_id, values):
# """Set the given properties on an service and update it.
# Raises NotFound if service does not exist.
# """
# return IMPL.service_update(context, service_id, values)
###############
def volume_attach(context, values):
"""Attach a volume."""
return IMPL.volume_attach(context, values)
def volume_attached(context, volume_id, instance_id, host_name, mountpoint,
attach_mode='rw'):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, host_name,
mountpoint, attach_mode)
def volume_create(context, values):
"""Create a volume from the values dictionary."""
return IMPL.volume_create(context, values)
def volume_data_get_for_host(context, host, count_only=False):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_host(context,
host,
count_only)
def volume_data_get_for_project(context, project_id):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_project(context, project_id)
def volume_destroy(context, volume_id):
"""Destroy the volume or raise if it does not exist."""
return IMPL.volume_destroy(context, volume_id)
def volume_detached(context, volume_id, attachment_id):
"""Ensure that a volume is set as detached."""
return IMPL.volume_detached(context, volume_id, attachment_id)
def volume_get(context, volume_id):
"""Get a volume or raise if it does not exist."""
return IMPL.volume_get(context, volume_id)
def volume_get_all(context, marker, limit, sort_keys=None, sort_dirs=None,
filters=None, offset=None):
"""Get all volumes."""
return IMPL.volume_get_all(context, marker, limit, sort_keys=sort_keys,
sort_dirs=sort_dirs, filters=filters,
offset=offset)
def volume_get_all_by_host(context, host, filters=None):
"""Get all volumes belonging to a host."""
return IMPL.volume_get_all_by_host(context, host, filters=filters)
def volume_get_all_by_group(context, group_id, filters=None):
"""Get all volumes belonging to a consistency group."""
return IMPL.volume_get_all_by_group(context, group_id, filters=filters)
def volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None, filters=None,
offset=None):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
def volume_update(context, volume_id, values):
"""Set the given properties on a volume and update it.
Raises NotFound if volume does not exist.
"""
return IMPL.volume_update(context, volume_id, values)
def volume_attachment_update(context, attachment_id, values):
return IMPL.volume_attachment_update(context, attachment_id, values)
def volume_attachment_get(context, attachment_id, session=None):
return IMPL.volume_attachment_get(context, attachment_id, session)
def volume_attachment_get_used_by_volume_id(context, volume_id):
return IMPL.volume_attachment_get_used_by_volume_id(context, volume_id)
def volume_attachment_get_by_host(context, volume_id, host):
return IMPL.volume_attachment_get_by_host(context, volume_id, host)
def volume_attachment_get_by_instance_uuid(context, volume_id, instance_uuid):
return IMPL.volume_attachment_get_by_instance_uuid(context, volume_id,
instance_uuid)
def volume_update_status_based_on_attachment(context, volume_id):
"""Update volume status according to attached instance id"""
return IMPL.volume_update_status_based_on_attachment(context, volume_id)
def volume_has_snapshots_filter():
return IMPL.volume_has_snapshots_filter()
def volume_has_undeletable_snapshots_filter():
return IMPL.volume_has_undeletable_snapshots_filter()
def volume_has_attachments_filter():
return IMPL.volume_has_attachments_filter()
####################
def snapshot_create(context, values):
"""Create a snapshot from the values dictionary."""
return IMPL.snapshot_create(context, values)
def snapshot_destroy(context, snapshot_id):
"""Destroy the snapshot or raise if it does not exist."""
return IMPL.snapshot_destroy(context, snapshot_id)
def snapshot_get(context, snapshot_id):
"""Get a snapshot or raise if it does not exist."""
return IMPL.snapshot_get(context, snapshot_id)
def snapshot_get_all(context, filters=None, marker=None, limit=None,
sort_keys=None, sort_dirs=None, offset=None):
"""Get all snapshots."""
return IMPL.snapshot_get_all(context, filters, marker, limit, sort_keys,
sort_dirs, offset)
def snapshot_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None):
"""Get all snapshots belonging to a project."""
return IMPL.snapshot_get_all_by_project(context, project_id, filters,
marker, limit, sort_keys,
sort_dirs, offset)
def snapshot_get_by_host(context, host, filters=None):
"""Get all snapshots belonging to a host.
:param host: Include include snapshots only for specified host.
:param filters: Filters for the query in the form of key/value.
"""
return IMPL.snapshot_get_by_host(context, host, filters)
def snapshot_get_all_for_cgsnapshot(context, project_id):
"""Get all snapshots belonging to a cgsnapshot."""
return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id)
def snapshot_get_all_for_volume(context, volume_id):
"""Get all snapshots for a volume."""
return IMPL.snapshot_get_all_for_volume(context, volume_id)
def snapshot_update(context, snapshot_id, values):
"""Set the given properties on an snapshot and update it.
Raises NotFound if snapshot does not exist.
"""
return IMPL.snapshot_update(context, snapshot_id, values)
def snapshot_data_get_for_project(context, project_id, volume_type_id=None):
"""Get count and gigabytes used for snapshots for specified project."""
return IMPL.snapshot_data_get_for_project(context,
project_id,
volume_type_id)
def snapshot_get_active_by_window(context, begin, end=None, project_id=None):
"""Get all the snapshots inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.snapshot_get_active_by_window(context, begin, end, project_id)
####################
def snapshot_metadata_get(context, snapshot_id):
"""Get all metadata for a snapshot."""
return IMPL.snapshot_metadata_get(context, snapshot_id)
def snapshot_metadata_delete(context, snapshot_id, key):
"""Delete the given metadata item."""
return IMPL.snapshot_metadata_delete(context, snapshot_id, key)
def snapshot_metadata_update(context, snapshot_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.snapshot_metadata_update(context, snapshot_id,
metadata, delete)
####################
def volume_metadata_get(context, volume_id):
"""Get all metadata for a volume."""
return IMPL.volume_metadata_get(context, volume_id)
def volume_metadata_delete(context, volume_id, key,
meta_type=common.METADATA_TYPES.user):
"""Delete the given metadata item."""
return IMPL.volume_metadata_delete(context, volume_id,
key, meta_type)
def volume_metadata_update(context, volume_id, metadata,
delete, meta_type=common.METADATA_TYPES.user):
"""Update metadata if it exists, otherwise create it."""
return IMPL.volume_metadata_update(context, volume_id, metadata,
delete, meta_type)
##################
def volume_admin_metadata_get(context, volume_id):
"""Get all administration metadata for a volume."""
return IMPL.volume_admin_metadata_get(context, volume_id)
def volume_admin_metadata_delete(context, volume_id, key):
"""Delete the given metadata item."""
return IMPL.volume_admin_metadata_delete(context, volume_id, key)
def volume_admin_metadata_update(context, volume_id, metadata, delete,
add=True, update=True):
"""Update metadata if it exists, otherwise create it."""
return IMPL.volume_admin_metadata_update(context, volume_id, metadata,
delete, add, update)
##################
def volume_type_create(context, values, projects=None):
"""Create a new volume type."""
return IMPL.volume_type_create(context, values, projects)
def volume_type_update(context, volume_type_id, values):
return IMPL.volume_type_update(context, volume_type_id, values)
def volume_type_get_all(context, inactive=False, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None, list_result=False):
"""Get all volume types.
:param context: context to query under
:param inactive: Include inactive volume types to the result set
:param filters: Filters for the query in the form of key/value.
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param list_result: For compatibility, if list_result = True, return a list
instead of dict.
:is_public: Filter volume types based on visibility:
* **True**: List public volume types only
* **False**: List private volume types only
* **None**: List both public and private volume types
:returns: list/dict of matching volume types
"""
return IMPL.volume_type_get_all(context, inactive, filters, marker=marker,
limit=limit, sort_keys=sort_keys,
sort_dirs=sort_dirs, offset=offset,
list_result=list_result)
def volume_type_get(context, id, inactive=False, expected_fields=None):
"""Get volume type by id.
:param context: context to query under
:param id: Volume type id to get.
:param inactive: Consider inactive volume types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: volume type
"""
return IMPL.volume_type_get(context, id, inactive, expected_fields)
def volume_type_get_by_name(context, name):
"""Get volume type by name."""
return IMPL.volume_type_get_by_name(context, name)
def volume_types_get_by_name_or_id(context, volume_type_list):
"""Get volume types by name or id."""
return IMPL.volume_types_get_by_name_or_id(context, volume_type_list)
def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
"""Get volume types that are associated with specific qos specs."""
return IMPL.volume_type_qos_associations_get(context,
qos_specs_id,
inactive)
def volume_type_qos_associate(context, type_id, qos_specs_id):
"""Associate a volume type with specific qos specs."""
return IMPL.volume_type_qos_associate(context, type_id, qos_specs_id)
def volume_type_qos_disassociate(context, qos_specs_id, type_id):
"""Disassociate a volume type from specific qos specs."""
return IMPL.volume_type_qos_disassociate(context, qos_specs_id, type_id)
def volume_type_qos_disassociate_all(context, qos_specs_id):
"""Disassociate all volume types from specific qos specs."""
return IMPL.volume_type_qos_disassociate_all(context,
qos_specs_id)
def volume_type_qos_specs_get(context, type_id):
"""Get all qos specs for given volume type."""
return IMPL.volume_type_qos_specs_get(context, type_id)
def volume_type_destroy(context, id):
"""Delete a volume type."""
return IMPL.volume_type_destroy(context, id)
def volume_get_active_by_window(context, begin, end=None, project_id=None):
"""Get all the volumes inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.volume_get_active_by_window(context, begin, end, project_id)
def volume_type_access_get_all(context, type_id):
"""Get all volume type access of a volume type."""
return IMPL.volume_type_access_get_all(context, type_id)
def volume_type_access_add(context, type_id, project_id):
"""Add volume type access for project."""
return IMPL.volume_type_access_add(context, type_id, project_id)
def volume_type_access_remove(context, type_id, project_id):
"""Remove volume type access for project."""
return IMPL.volume_type_access_remove(context, type_id, project_id)
####################
def volume_type_extra_specs_get(context, volume_type_id):
"""Get all extra specs for a volume type."""
return IMPL.volume_type_extra_specs_get(context, volume_type_id)
def volume_type_extra_specs_delete(context, volume_type_id, key):
"""Delete the given extra specs item."""
return IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
def volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs):
"""Create or update volume type extra specs.
This adds or modifies the key/value pairs specified in the extra specs dict
argument.
"""
return IMPL.volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs)
###################
def volume_type_encryption_get(context, volume_type_id, session=None):
return IMPL.volume_type_encryption_get(context, volume_type_id, session)
def volume_type_encryption_delete(context, volume_type_id):
return IMPL.volume_type_encryption_delete(context, volume_type_id)
def volume_type_encryption_create(context, volume_type_id, encryption_specs):
return IMPL.volume_type_encryption_create(context, volume_type_id,
encryption_specs)
def volume_type_encryption_update(context, volume_type_id, encryption_specs):
return IMPL.volume_type_encryption_update(context, volume_type_id,
encryption_specs)
def volume_type_encryption_volume_get(context, volume_type_id, session=None):
return IMPL.volume_type_encryption_volume_get(context, volume_type_id,
session)
def volume_encryption_metadata_get(context, volume_id, session=None):
return IMPL.volume_encryption_metadata_get(context, volume_id, session)
###################
def qos_specs_create(context, values):
"""Create a qos_specs."""
return IMPL.qos_specs_create(context, values)
def qos_specs_get(context, qos_specs_id):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get(context, qos_specs_id)
def qos_specs_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all qos_specs."""
return IMPL.qos_specs_get_all(context, filters=filters, marker=marker,
limit=limit, offset=offset,
sort_keys=sort_keys, sort_dirs=sort_dirs)
def qos_specs_get_by_name(context, name):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get_by_name(context, name)
def qos_specs_associations_get(context, qos_specs_id):
"""Get all associated volume types for a given qos_specs."""
return IMPL.qos_specs_associations_get(context, qos_specs_id)
def qos_specs_associate(context, qos_specs_id, type_id):
"""Associate qos_specs from volume type."""
return IMPL.qos_specs_associate(context, qos_specs_id, type_id)
def qos_specs_disassociate(context, qos_specs_id, type_id):
"""Disassociate qos_specs from volume type."""
return IMPL.qos_specs_disassociate(context, qos_specs_id, type_id)
def qos_specs_disassociate_all(context, qos_specs_id):
"""Disassociate qos_specs from all entities."""
return IMPL.qos_specs_disassociate_all(context, qos_specs_id)
def qos_specs_delete(context, qos_specs_id):
"""Delete the qos_specs."""
return IMPL.qos_specs_delete(context, qos_specs_id)
def qos_specs_item_delete(context, qos_specs_id, key):
"""Delete specified key in the qos_specs."""
return IMPL.qos_specs_item_delete(context, qos_specs_id, key)
def qos_specs_update(context, qos_specs_id, specs):
"""Update qos specs.
This adds or modifies the key/value pairs specified in the
specs dict argument for a given qos_specs.
"""
return IMPL.qos_specs_update(context, qos_specs_id, specs)
###################
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for the specified volume."""
return IMPL.volume_glance_metadata_create(context,
volume_id,
key,
value)
def volume_glance_metadata_bulk_create(context, volume_id, metadata):
"""Add Glance metadata for specified volume (multiple pairs)."""
return IMPL.volume_glance_metadata_bulk_create(context, volume_id,
metadata)
def volume_glance_metadata_get_all(context):
"""Return the glance metadata for all volumes."""
return IMPL.volume_glance_metadata_get_all(context)
def volume_glance_metadata_get(context, volume_id):
"""Return the glance metadata for a volume."""
return IMPL.volume_glance_metadata_get(context, volume_id)
def volume_glance_metadata_list_get(context, volume_id_list):
"""Return the glance metadata for a volume list."""
return IMPL.volume_glance_metadata_list_get(context, volume_id_list)
def volume_snapshot_glance_metadata_get(context, snapshot_id):
"""Return the Glance metadata for the specified snapshot."""
return IMPL.volume_snapshot_glance_metadata_get(context, snapshot_id)
def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id):
"""Update the Glance metadata for a snapshot.
This will copy all of the key:value pairs from the originating volume,
to ensure that a volume created from the snapshot will retain the
original metadata.
"""
return IMPL.volume_glance_metadata_copy_to_snapshot(context, snapshot_id,
volume_id)
def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id):
"""Update the Glance metadata from a volume (created from a snapshot).
This will copy all of the key:value pairs from the originating snapshot,
to ensure that the Glance metadata from the original volume is retained.
"""
return IMPL.volume_glance_metadata_copy_to_volume(context, volume_id,
snapshot_id)
def volume_glance_metadata_delete_by_volume(context, volume_id):
"""Delete the glance metadata for a volume."""
return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id)
def volume_glance_metadata_delete_by_snapshot(context, snapshot_id):
"""Delete the glance metadata for a snapshot."""
return IMPL.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
def volume_glance_metadata_copy_from_volume_to_volume(context,
src_volume_id,
volume_id):
"""Update the Glance metadata for a volume.
Update the Glance metadata for a volume by copying all of the key:value
pairs from the originating volume.
This is so that a volume created from the volume (clone) will retain the
original metadata.
"""
return IMPL.volume_glance_metadata_copy_from_volume_to_volume(
context,
src_volume_id,
volume_id)
###################
def quota_create(context, project_id, resource, limit, allocated=0):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit,
allocated=allocated)
def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_allocated_get_all_by_project(context, project_id):
"""Retrieve all allocated quotas associated with a given project."""
return IMPL.quota_allocated_get_all_by_project(context, project_id)
def quota_allocated_update(context, project_id,
resource, allocated):
"""Update allocated quota to subprojects or raise if it does not exist.
:raises: storage.exception.ProjectQuotaNotFound
"""
return IMPL.quota_allocated_update(context, project_id,
resource, allocated)
def quota_update(context, project_id, resource, limit):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit)
def quota_update_resource(context, old_res, new_res):
"""Update resource of quotas."""
return IMPL.quota_update_resource(context, old_res, new_res)
def quota_destroy(context, project_id, resource):
"""Destroy the quota or raise if it does not exist."""
return IMPL.quota_destroy(context, project_id, resource)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_default(context):
"""Retrieve all default quotas."""
return IMPL.quota_class_get_default(context)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
def quota_class_update_resource(context, resource, new_resource):
"""Update resource name in quota_class."""
return IMPL.quota_class_update_resource(context, resource, new_resource)
def quota_class_destroy(context, class_name, resource):
"""Destroy the quota class or raise if it does not exist."""
return IMPL.quota_class_destroy(context, class_name, resource)
def quota_class_destroy_all_by_name(context, class_name):
"""Destroy all quotas associated with a given quota class."""
return IMPL.quota_class_destroy_all_by_name(context, class_name)
###################
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
###################
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None,
is_allocated_reserve=False):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=project_id,
is_allocated_reserve=is_allocated_reserve)
def reservation_commit(context, reservations, project_id=None):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations,
project_id=project_id)
def reservation_rollback(context, reservations, project_id=None):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations,
project_id=project_id)
def quota_destroy_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
def quota_usage_update_resource(context, old_res, new_res):
"""Update resource field in quota_usages."""
return IMPL.quota_usage_update_resource(context, old_res, new_res)
###################
def backup_get(context, backup_id, read_deleted=None, project_only=True):
"""Get a backup or raise if it does not exist."""
return IMPL.backup_get(context, backup_id, read_deleted, project_only)
def backup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all backups."""
return IMPL.backup_get_all(context, filters=filters, marker=marker,
limit=limit, offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
def backup_get_all_by_host(context, host):
"""Get all backups belonging to a host."""
return IMPL.backup_get_all_by_host(context, host)
def backup_create(context, values):
"""Create a backup from the values dictionary."""
return IMPL.backup_create(context, values)
def backup_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, offset=None, sort_keys=None,
sort_dirs=None):
"""Get all backups belonging to a project."""
return IMPL.backup_get_all_by_project(context, project_id,
filters=filters, marker=marker,
limit=limit, offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
def backup_get_all_by_volume(context, volume_id, filters=None):
"""Get all backups belonging to a volume."""
return IMPL.backup_get_all_by_volume(context, volume_id,
filters=filters)
def backup_update(context, backup_id, values):
"""Set the given properties on a backup and update it.
Raises NotFound if backup does not exist.
"""
return IMPL.backup_update(context, backup_id, values)
def backup_destroy(context, backup_id):
"""Destroy the backup or raise if it does not exist."""
return IMPL.backup_destroy(context, backup_id)
###################
def transfer_get(context, transfer_id):
"""Get a volume transfer record or raise if it does not exist."""
return IMPL.transfer_get(context, transfer_id)
def transfer_get_all(context):
"""Get all volume transfer records."""
return IMPL.transfer_get_all(context)
def transfer_get_all_by_project(context, project_id):
"""Get all volume transfer records for specified project."""
return IMPL.transfer_get_all_by_project(context, project_id)
def transfer_create(context, values):
"""Create an entry in the transfers table."""
return IMPL.transfer_create(context, values)
def transfer_destroy(context, transfer_id):
"""Destroy a record in the volume transfer table."""
return IMPL.transfer_destroy(context, transfer_id)
def transfer_accept(context, transfer_id, user_id, project_id):
"""Accept a volume transfer."""
return IMPL.transfer_accept(context, transfer_id, user_id, project_id)
###################
def consistencygroup_get(context, consistencygroup_id):
"""Get a consistencygroup or raise if it does not exist."""
return IMPL.consistencygroup_get(context, consistencygroup_id)
def consistencygroup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all consistencygroups."""
return IMPL.consistencygroup_get_all(context, filters=filters,
marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
def consistencygroup_create(context, values):
"""Create a consistencygroup from the values dictionary."""
return IMPL.consistencygroup_create(context, values)
def consistencygroup_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Get all consistencygroups belonging to a project."""
return IMPL.consistencygroup_get_all_by_project(context, project_id,
filters=filters,
marker=marker, limit=limit,
offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
def consistencygroup_update(context, consistencygroup_id, values):
"""Set the given properties on a consistencygroup and update it.
Raises NotFound if consistencygroup does not exist.
"""
return IMPL.consistencygroup_update(context, consistencygroup_id, values)
def consistencygroup_destroy(context, consistencygroup_id):
"""Destroy the consistencygroup or raise if it does not exist."""
return IMPL.consistencygroup_destroy(context, consistencygroup_id)
###################
def cgsnapshot_get(context, cgsnapshot_id):
"""Get a cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_get(context, cgsnapshot_id)
def cgsnapshot_get_all(context, filters=None):
"""Get all cgsnapshots."""
return IMPL.cgsnapshot_get_all(context, filters)
def cgsnapshot_create(context, values):
"""Create a cgsnapshot from the values dictionary."""
return IMPL.cgsnapshot_create(context, values)
def cgsnapshot_get_all_by_group(context, group_id, filters=None):
"""Get all cgsnapshots belonging to a consistency group."""
return IMPL.cgsnapshot_get_all_by_group(context, group_id, filters)
def cgsnapshot_get_all_by_project(context, project_id, filters=None):
"""Get all cgsnapshots belonging to a project."""
return IMPL.cgsnapshot_get_all_by_project(context, project_id, filters)
def cgsnapshot_update(context, cgsnapshot_id, values):
"""Set the given properties on a cgsnapshot and update it.
Raises NotFound if cgsnapshot does not exist.
"""
return IMPL.cgsnapshot_update(context, cgsnapshot_id, values)
def cgsnapshot_destroy(context, cgsnapshot_id):
"""Destroy the cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_destroy(context, cgsnapshot_id)
def purge_deleted_rows(context, age_in_days):
"""Purge deleted rows older than given age from storage tables
Raises InvalidParameterValue if age_in_days is incorrect.
:returns: number of deleted rows
"""
return IMPL.purge_deleted_rows(context, age_in_days=age_in_days)
def get_booleans_for_table(table_name):
return IMPL.get_booleans_for_table(table_name)
###################
def driver_initiator_data_update(context, initiator, namespace, updates):
"""Create DriverPrivateData from the values dictionary."""
return IMPL.driver_initiator_data_update(context, initiator,
namespace, updates)
def driver_initiator_data_get(context, initiator, namespace):
"""Query for an DriverPrivateData that has the specified key"""
return IMPL.driver_initiator_data_get(context, initiator, namespace)
###################
def image_volume_cache_create(context, host, image_id, image_updated_at,
volume_id, size):
"""Create a new image volume cache entry."""
return IMPL.image_volume_cache_create(context,
host,
image_id,
image_updated_at,
volume_id,
size)
def image_volume_cache_delete(context, volume_id):
"""Delete an image volume cache entry specified by volume id."""
return IMPL.image_volume_cache_delete(context, volume_id)
def image_volume_cache_get_and_update_last_used(context, image_id, host):
"""Query for an image volume cache entry."""
return IMPL.image_volume_cache_get_and_update_last_used(context,
image_id,
host)
def image_volume_cache_get_by_volume_id(context, volume_id):
"""Query to see if a volume id is an image-volume contained in the cache"""
return IMPL.image_volume_cache_get_by_volume_id(context, volume_id)
def image_volume_cache_get_all_for_host(context, host):
"""Query for all image volume cache entry for a host."""
return IMPL.image_volume_cache_get_all_for_host(context, host)
###################
def get_model_for_versioned_object(versioned_object):
return IMPL.get_model_for_versioned_object(versioned_object)
def get_by_id(context, model, id, *args, **kwargs):
return IMPL.get_by_id(context, model, id, *args, **kwargs)
class Condition(object):
"""Class for normal condition values for conditional_update."""
def __init__(self, value, field=None):
self.value = value
# Field is optional and can be passed when getting the filter
self.field = field
def get_filter(self, model, field=None):
return IMPL.condition_db_filter(model, self._get_field(field),
self.value)
def _get_field(self, field=None):
# We must have a defined field on initialization or when called
field = field or self.field
if not field:
raise ValueError(_('Condition has no field.'))
return field
class Not(Condition):
"""Class for negated condition values for conditional_update.
By default NULL values will be treated like Python treats None instead of
how SQL treats it.
So for example when values are (1, 2) it will evaluate to True when we have
value 3 or NULL, instead of only with 3 like SQL does.
"""
def __init__(self, value, field=None, auto_none=True):
super(Not, self).__init__(value, field)
self.auto_none = auto_none
def get_filter(self, model, field=None):
# If implementation has a specific method use it
if hasattr(IMPL, 'condition_not_db_filter'):
return IMPL.condition_not_db_filter(model, self._get_field(field),
self.value, self.auto_none)
# Otherwise non negated object must adming ~ operator for not
return ~super(Not, self).get_filter(model, field)
class Case(object):
"""Class for conditional value selection for conditional_update."""
def __init__(self, whens, value=None, else_=None):
self.whens = whens
self.value = value
self.else_ = else_
def is_orm_value(obj):
"""Check if object is an ORM field."""
return IMPL.is_orm_value(obj)
def conditional_update(context, model, values, expected_values, filters=(),
include_deleted='no', project_only=False):
"""Compare-and-swap conditional update.
Update will only occur in the DB if conditions are met.
We have 4 different condition types we can use in expected_values:
- Equality: {'status': 'available'}
- Inequality: {'status': vol_obj.Not('deleting')}
- In range: {'status': ['available', 'error']
- Not in range: {'status': vol_obj.Not(['in-use', 'attaching'])
Method accepts additional filters, which are basically anything that
can be passed to a sqlalchemy query's filter method, for example:
[~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)]
We can select values based on conditions using Case objects in the
'values' argument. For example:
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
case_values = db.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
db.conditional_update(context, models.Volume, {'status': case_values},
{'status': 'available'})
And we can use DB fields for example to store previous status in the
corresponding field even though we don't know which value is in the db
from those we allowed:
db.conditional_update(context, models.Volume,
{'status': 'deleting',
'previous_status': models.Volume.status},
{'status': ('available', 'error')})
WARNING: SQLAlchemy does not allow selecting order of SET clauses, so
for now we cannot do things like
{'previous_status': model.status, 'status': 'retyping'}
because it will result in both previous_status and status being set to
'retyping'. Issue has been reported [1] and a patch to fix it [2] has
been submitted.
[1]: https://bitbucket.org/zzzeek/sqlalchemy/issues/3541/
[2]: https://github.com/zzzeek/sqlalchemy/pull/200
:param values: Dictionary of key-values to update in the DB.
:param expected_values: Dictionary of conditions that must be met
for the update to be executed.
:param filters: Iterable with additional filters
:param include_deleted: Should the update include deleted items, this
is equivalent to read_deleted
:param project_only: Should the query be limited to context's project.
:returns number of db rows that were updated
"""
return IMPL.conditional_update(context, model, values, expected_values,
filters, include_deleted, project_only)
| apache-2.0 | 5,485,486,144,737,427,000 | 35.135812 | 79 | 0.643982 | false | 4.002149 | false | false | false |
HelloLily/hellolily | lily/search/scan_search.py | 1 | 1365 | import inspect
from django.conf import settings
from elasticutils.contrib.django import MappingType
from lily.search.base_mapping import BaseMapping
class ModelMappings(object):
mappings = []
model_to_mappings = {}
app_to_mappings = {}
@classmethod
def scan(cls, apps_to_scan=settings.INSTALLED_APPS):
mappings = []
apps = []
models = []
for app in apps_to_scan:
# Try because not every app has a search.py.
try:
# Import the child module 'search', hence the additional
# parameters. (Otherwise only the top module is returned).
search_module = __import__('%s.search' % app, globals(), locals(), ['search'])
for name_member in inspect.getmembers(search_module, inspect.isclass):
member = name_member[1]
# Check if we defined a mapping class. We shall exclude
# members of BaseMapping or MappingType itself.
if issubclass(member, MappingType) and member is not BaseMapping and member is not MappingType:
cls.mappings.append(member)
cls.model_to_mappings[member.get_model()] = member
cls.app_to_mappings[app] = member
except Exception:
pass
| agpl-3.0 | -6,483,834,906,151,082,000 | 38 | 115 | 0.578755 | false | 4.690722 | false | false | false |
jobovy/galpy | galpy/potential/interpSphericalPotential.py | 1 | 4568 | ###################3###################3###################3##################
# interpSphericalPotential.py: build spherical potential through interpolation
###################3###################3###################3##################
import numpy
from scipy import interpolate
from .SphericalPotential import SphericalPotential
from .Potential import _evaluateRforces, _evaluatePotentials
from ..util.conversion import physical_compatible, get_physical
class interpSphericalPotential(SphericalPotential):
"""__init__(self,rforce=None,rgrid=numpy.geomspace(0.01,20,101),Phi0=None,ro=None,vo=None)
Class that interpolates a spherical potential on a grid"""
def __init__(self,rforce=None,rgrid=numpy.geomspace(0.01,20,101),Phi0=None,
ro=None,vo=None):
"""__init__(self,rforce=None,rgrid=numpy.geomspace(0.01,20,101),Phi0=None,ro=None,vo=None)
NAME:
__init__
PURPOSE:
initialize an interpolated, spherical potential
INPUT:
rforce= (None) Either a) function that gives the radial force as a function of r or b) a galpy Potential instance or list thereof
rgrid= (numpy.geomspace(0.01,20,101)) radial grid on which to evaluate the potential for interpolation (note that beyond rgrid[-1], the potential is extrapolated as -GM(<rgrid[-1])/r)
Phi0= (0.) value of the potential at rgrid[0] (only necessary when rforce is a function, for galpy potentials automatically determined)
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2020-07-13 - Written - Bovy (UofT)
"""
SphericalPotential.__init__(self,amp=1.,ro=ro,vo=vo)
self._rgrid= rgrid
# Determine whether rforce is a galpy Potential or list thereof
try:
_evaluateRforces(rforce,1.,0.)
except:
_rforce= rforce
Phi0= 0. if Phi0 is None else Phi0
else:
_rforce= lambda r: _evaluateRforces(rforce,r,0.)
# Determine Phi0
Phi0= _evaluatePotentials(rforce,rgrid[0],0.)
# Also check that unit systems are compatible
if not physical_compatible(self,rforce):
raise RuntimeError('Unit conversion factors ro and vo incompatible between Potential to be interpolated and the factors given to interpSphericalPotential')
# If set for the parent, set for the interpolated
phys= get_physical(rforce,include_set=True)
if phys['roSet']:
self.turn_physical_on(ro=phys['ro'])
if phys['voSet']:
self.turn_physical_on(vo=phys['vo'])
self._rforce_grid= numpy.array([_rforce(r) for r in rgrid])
self._force_spline= interpolate.InterpolatedUnivariateSpline(
self._rgrid,self._rforce_grid,k=3,ext=0)
# Get potential and r2deriv as splines for the integral and derivative
self._pot_spline= self._force_spline.antiderivative()
self._Phi0= Phi0+self._pot_spline(self._rgrid[0])
self._r2deriv_spline= self._force_spline.derivative()
# Extrapolate as mass within rgrid[-1]
self._rmin= rgrid[0]
self._rmax= rgrid[-1]
self._total_mass= -self._rmax**2.*self._force_spline(self._rmax)
self._Phimax= -self._pot_spline(self._rmax)+self._Phi0\
+self._total_mass/self._rmax
self.hasC= True
self.hasC_dxdv= True
self.hasC_dens= True
return None
def _revaluate(self,r,t=0.):
out= numpy.empty_like(r)
out[r >= self._rmax]= -self._total_mass/r[r >= self._rmax]+self._Phimax
out[r < self._rmax]= -self._pot_spline(r[r < self._rmax])+self._Phi0
return out
def _rforce(self,r,t=0.):
out= numpy.empty_like(r)
out[r >= self._rmax]= -self._total_mass/r[r >= self._rmax]**2.
out[r < self._rmax]= self._force_spline(r[r < self._rmax])
return out
def _r2deriv(self,r,t=0.):
out= numpy.empty_like(r)
out[r >= self._rmax]= -2.*self._total_mass/r[r >= self._rmax]**3.
out[r < self._rmax]= -self._r2deriv_spline(r[r < self._rmax])
return out
def _rdens(self,r,t=0.):
out= numpy.empty_like(r)
out[r >= self._rmax]= 0.
# Fall back onto Poisson eqn., implemented in SphericalPotential
out[r < self._rmax]= SphericalPotential._rdens(self,r[r < self._rmax])
return out
| bsd-3-clause | -5,407,083,161,345,688,000 | 42.09434 | 194 | 0.601576 | false | 3.457986 | false | false | false |
OpenTechFund/WebApp | opentech/apply/funds/migrations/0045_new_workflow.py | 1 | 1533 | # Generated by Django 2.0.8 on 2018-10-24 12:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('funds', '0044_add_named_blocks'),
]
operations = [
migrations.AlterField(
model_name='applicationbase',
name='workflow_name',
field=models.CharField(choices=[('single', 'Request'), ('single_ext', 'Request with external review'), ('double', 'Concept & Proposal')], default='single', max_length=100, verbose_name='Workflow'),
),
migrations.AlterField(
model_name='applicationsubmission',
name='workflow_name',
field=models.CharField(choices=[('single', 'Request'), ('single_ext', 'Request with external review'), ('double', 'Concept & Proposal')], default='single', max_length=100, verbose_name='Workflow'),
),
migrations.AlterField(
model_name='labbase',
name='workflow_name',
field=models.CharField(choices=[('single', 'Request'), ('single_ext', 'Request with external review'), ('double', 'Concept & Proposal')], default='single', max_length=100, verbose_name='Workflow'),
),
migrations.AlterField(
model_name='roundbase',
name='workflow_name',
field=models.CharField(choices=[('single', 'Request'), ('single_ext', 'Request with external review'), ('double', 'Concept & Proposal')], default='single', max_length=100, verbose_name='Workflow'),
),
]
| gpl-2.0 | 7,132,013,135,087,195,000 | 45.454545 | 209 | 0.609263 | false | 4.330508 | false | false | false |
ActiveState/code | recipes/Python/496993_Separating_Pattern_ImplementatiYour/recipe-496993.py | 1 | 3324 | ##########
# pattern_impl.py
##########
from installmethod import installmethod # the installmethod from recipe: 223613
class ObserverPattern:
"""
A reusable implementation of the Observer pattern.
"""
theSubject = None
observers = {}
class Subject:
def __init__(self):
self.observers = []
def attach(self, observer):
self.observers.append(observer)
def detach(self, observer):
self.observers.remove(observer)
def notify(self):
for observer in self.observers:
observer.update(self)
def decoration(self):
self.decorated_trigger()
self.notify()
class Observer:
def __init__(self, subject):
subject.attach(self)
def update(self, observer):
currentState = observer.get_current_state()
self.react_to_observation(currentState)
def specify_subject(self, subject):
self.theSubject = subject
self.make_generalization(subject, self.Subject)
def add_observer(self, observer):
self.observers[observer.__name__] = observer
self.make_generalization(observer, self.Observer)
def make_generalization(self, childClass, parentClass):
bases = list(childClass.__bases__)
bases.append(parentClass)
childClass.__bases__ = tuple(bases)
def make_observation(self, changeObservation, changeReaction):
func = getattr(self.theSubject, changeObservation)
installmethod(func, self.theSubject, "get_current_state")
for observer in self.observers.keys():
func = getattr(self.observers[observer], changeReaction)
installmethod(func, self.observers[observer], "react_to_observation")
def add_trigger(self, trigger):
func = getattr(self.theSubject, trigger)
installmethod(func, self.theSubject, "decorated_trigger")
func = getattr(self.theSubject, "decoration")
installmethod(func, self.theSubject, trigger)
##########
# example.py
##########
class ClockTimer:
def get_time(self):
# get current state of the subject
return self.currentTime
def tick(self):
# update internal time-keeping state
import time
self.currentTime = time.ctime()
class DigitalClock:
def draw(self, currentTime):
# display currentTime as a digital clock
print "DigitalClock: current time is", currentTime
class AnalogClock:
def draw(self, currentTime):
# display currentTime as an analog clock
print "AnalogClock: current time is", currentTime
if __name__ == '__main__':
from pattern_impl import ObserverPattern
observerPattern = ObserverPattern()
observerPattern.specify_subject(ClockTimer)
observerPattern.add_observer(DigitalClock)
observerPattern.add_observer(AnalogClock)
observerPattern.make_observation("get_time", "draw")
observerPattern.add_trigger("tick")
aTimer = ClockTimer()
dClock = DigitalClock(aTimer)
aClock = AnalogClock(aTimer)
import time
for i in range(10):
print "\nTick!"
aTimer.tick()
time.sleep(1)
| mit | -542,397,210,227,763,600 | 28.945946 | 81 | 0.619134 | false | 4.339426 | false | false | false |
bitmovin/bitmovin-python | tests/bitmovin/services/encodings/drms/playready_drm_tests.py | 1 | 16848 | import unittest
import uuid
import json
from bitmovin import Bitmovin, Response, Stream, StreamInput, EncodingOutput, ACLEntry, Encoding, \
FMP4Muxing, MuxingStream, PlayReadyDRM, SelectionMode, ACLPermission, PlayReadyDRMAdditionalInformation
from bitmovin.errors import BitmovinApiError, InvalidTypeError
from tests.bitmovin import BitmovinTestCase
from bitmovin.resources.enums import PlayReadyMethod
class PlayReadyDRMTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
self.sampleEncoding = self._create_sample_encoding() # type: Encoding
def tearDown(self):
super().tearDown()
def test_create_drm(self):
fmp4_muxing = self._create_muxing() # type: FMP4Muxing
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
def test_create_drm_with_additional_information(self):
fmp4_muxing = self._create_muxing() # type: FMP4Muxing
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready_with_additional_information()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self.assertIsNotNone(drm_resource.additionalInformation)
self._compare_drms(sample_drm, drm_resource)
def test_create_playready_piff(self):
fmp4_muxing = self._create_muxing() # type: FMP4Muxing
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready_piff()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
def test_create_playready_key(self):
fmp4_muxing = self._create_muxing() # type: FMP4Muxing
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready_key()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
def test_assign_unsuitable_playready_method(self):
sample_drm = self._get_sample_drm_playready_piff()
with self.assertRaises(InvalidTypeError):
sample_drm.method = ACLPermission.PRIVATE
def test_create_drm_without_name(self):
fmp4_muxing = self._create_muxing() # type: FMP4Muxing
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready()
sample_drm.name = None
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
def test_retrieve_drm(self):
fmp4_muxing = self._create_muxing()
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
retrieved_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.retrieve(
encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id, drm_id=drm_resource.id)
self.assertIsNotNone(retrieved_drm_response)
self.assertIsNotNone(retrieved_drm_response.resource)
self._compare_drms(retrieved_drm_response.resource, created_drm_response.resource)
def test_delete_drm(self):
fmp4_muxing = self._create_muxing()
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
deleted_minimal_resource = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.delete(
muxing_id=fmp4_muxing.id, encoding_id=self.sampleEncoding.id, drm_id=drm_resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.retrieve(encoding_id=self.sampleEncoding.id,
muxing_id=fmp4_muxing.id, drm_id=drm_resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving muxing after deleting it should not be possible.'
)
except BitmovinApiError:
pass
def test_list_drms(self):
fmp4_muxing = self._create_muxing()
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
drms = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.list(encoding_id=self.sampleEncoding.id,
muxing_id=fmp4_muxing.id)
self.assertIsNotNone(drms)
self.assertIsNotNone(drms.resource)
self.assertIsNotNone(drms.response)
self.assertIsInstance(drms.resource, list)
self.assertIsInstance(drms.response, Response)
self.assertGreater(drms.resource.__sizeof__(), 1)
def test_retrieve_stream_custom_data(self):
fmp4_muxing = self._create_muxing()
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready()
sample_drm.outputs = fmp4_muxing.outputs
sample_drm.customData = 'my_fancy_awesome_custom_data'
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
custom_data_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.retrieve_custom_data(
muxing_id=fmp4_muxing.id,
encoding_id=self.sampleEncoding.id,
drm_id=drm_resource.id
)
custom_data = custom_data_response.resource
self.assertEqual(sample_drm.customData, json.loads(custom_data.customData))
def _create_muxing(self):
sample_muxing = self._get_sample_muxing()
created_muxing_response = self.bitmovin.encodings.Muxing.FMP4.create(object_=sample_muxing,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(created_muxing_response)
self.assertIsNotNone(created_muxing_response.resource)
self.assertIsNotNone(created_muxing_response.resource.id)
self._compare_muxings(sample_muxing, created_muxing_response.resource)
return created_muxing_response.resource
def _compare_drms(self, first: PlayReadyDRM, second: PlayReadyDRM):
"""
:param first:
:param second:
:return: bool
"""
self.assertEqual(first.kid, second.kid)
self.assertEqual(first.keySeed, second.keySeed)
self.assertEqual(first.key, second.key)
self.assertEqual(first.method, second.method)
self.assertEqual(first.laUrl, second.laUrl)
self.assertEqual(len(first.outputs), len(second.outputs))
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
if first.additionalInformation is None and second.additionalInformation is None:
return True
self.assertEqual(first.additionalInformation.wrmHeaderCustomAttributes,
second.additionalInformation.wrmHeaderCustomAttributes)
return True
def _compare_muxings(self, first: FMP4Muxing, second: FMP4Muxing):
"""
:param first: Stream
:param second: Stream
:return: bool
"""
self.assertEqual(first.segmentLength, second.segmentLength)
self.assertEqual(first.segmentNaming, second.segmentNaming)
self.assertEqual(len(first.outputs), len(second.outputs))
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
return True
def _get_sample_drm_playready(self):
playready_drm_settings = self.settings.get('sampleObjects').get('drmConfigurations').get('PlayReady')
drm = PlayReadyDRM(key_seed=playready_drm_settings[0].get('keySeed'),
kid=playready_drm_settings[0].get('kid'),
method=playready_drm_settings[0].get('method'),
la_url=playready_drm_settings[0].get('laUrl'),
name='Sample Playready DRM')
return drm
def _get_sample_drm_playready_with_additional_information(self):
playready_drm_settings = self.settings.get('sampleObjects').get('drmConfigurations').get('PlayReady')
drm = PlayReadyDRM(key_seed=playready_drm_settings[0].get('keySeed'),
kid=playready_drm_settings[0].get('kid'),
method=playready_drm_settings[0].get('method'),
la_url=playready_drm_settings[0].get('laUrl'),
additional_information=PlayReadyDRMAdditionalInformation(
wrm_header_custom_attributes="<custom><tag1>text</tag1></custom>"),
name='Sample Playready DRM')
return drm
def _get_sample_drm_playready_piff(self):
playready_drm_settings = self.settings.get('sampleObjects').get('drmConfigurations').get('PlayReady')
drm = PlayReadyDRM(key_seed=playready_drm_settings[0].get('keySeed'),
kid=playready_drm_settings[0].get('kid'),
method=PlayReadyMethod.PIFF_CTR,
la_url=playready_drm_settings[0].get('laUrl'),
name='Sample Playready PIFF DRM')
return drm
def _get_sample_drm_playready_key(self):
playready_drm_settings = self.settings.get('sampleObjects').get('drmConfigurations').get('PlayReady')
drm = PlayReadyDRM(key=playready_drm_settings[0].get('key'),
kid=playready_drm_settings[0].get('kid'),
method=playready_drm_settings[0].get('method'),
la_url=playready_drm_settings[0].get('laUrl'),
name='Sample Playready DRM')
return drm
def _get_sample_muxing(self):
stream = self._get_sample_stream()
create_stream_response = self.bitmovin.encodings.Stream.create(object_=stream,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(create_stream_response)
self.assertIsNotNone(create_stream_response.resource)
self.assertIsNotNone(create_stream_response.resource.id)
muxing_stream = MuxingStream(stream_id=create_stream_response.resource.id)
muxing = FMP4Muxing(streams=[muxing_stream], segment_length=4, segment_naming='seg_%number%.ts',
outputs=stream.outputs, name='Sample FMP4 Muxing')
return muxing
def _get_sample_stream(self):
sample_codec_configuration = self.utils.get_sample_h264_codec_configuration()
h264_codec_configuration = self.bitmovin.codecConfigurations.H264.create(sample_codec_configuration)
(sample_input, sample_files) = self.utils.get_sample_s3_input()
s3_input = self.bitmovin.inputs.S3.create(sample_input)
stream_input = StreamInput(input_id=s3_input.resource.id,
input_path=sample_files.get('854b9c98-17b9-49ed-b75c-3b912730bfd1'),
selection_mode=SelectionMode.AUTO)
acl_entry = ACLEntry(scope='string', permission=ACLPermission.PUBLIC_READ)
sample_output = self.utils.get_sample_s3_output()
s3_output = self.bitmovin.outputs.S3.create(sample_output)
encoding_output = EncodingOutput(output_id=s3_output.resource.id,
output_path='/bitmovin-python/StreamTests/' + str(uuid.uuid4()),
acl=[acl_entry])
stream = Stream(codec_configuration_id=h264_codec_configuration.resource.id,
input_streams=[stream_input],
outputs=[encoding_output],
name='Sample Stream')
self.assertIsNotNone(stream.codecConfigId)
self.assertIsNotNone(stream.inputStreams)
self.assertIsNotNone(stream.outputs)
return stream
def _create_sample_encoding(self):
sample_encoding = self.utils.get_sample_encoding()
resource_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
return resource_response.resource
if __name__ == '__main__':
unittest.main()
| unlicense | -688,011,659,537,507,000 | 45.541436 | 120 | 0.657882 | false | 3.622447 | true | false | false |
Cepave/portal | web/controller/host.py | 1 | 6380 | # -*- coding:utf-8 -*-
__author__ = 'Ulric Qin'
from flask import jsonify, request, render_template, g, make_response
from web import app
from web.model.host_group import HostGroup
from web.model.group_host import GroupHost
from web.model.grp_tpl import GrpTpl
from web.model.host import Host
from web.model.template import Template
from frame import config
from fe_api import post2FeUpdateEventCase
import time
import logging
log = logging.getLogger(__name__)
@app.route('/group/<group_id>/hosts.txt')
def group_hosts_export(group_id):
group_id = int(group_id)
group = HostGroup.read(where='id = %s', params=[group_id])
if not group:
return jsonify(msg='no such group %s' % group_id)
vs, _ = Host.query(1, 10000000, '', '0', group_id)
names = [v.hostname for v in vs]
response = make_response('\n'.join(names))
response.headers["content-type"] = "text/plain"
return response
@app.route('/group/<group_id>/hosts')
def group_hosts_list(group_id):
g.xbox = request.args.get('xbox', '')
group_id = int(group_id)
group = HostGroup.read(where='id = %s', params=[group_id])
if not group:
return jsonify(msg='no such group %s' % group_id)
page = int(request.args.get('p', 1))
limit = int(request.args.get('limit', 10))
query = request.args.get('q', '')
maintaining = request.args.get('maintaining', '0')
vs, total = Host.query(page, limit, query, maintaining, group_id)
return render_template(
'host/index.html',
data={
'vs': vs,
'total': total,
'query': query,
'limit': limit,
'page': page,
'maintaining': maintaining,
'group': group,
},
config=config
)
@app.route('/host/remove', methods=['POST'])
def host_remove_post():
group_id = int(request.form['grp_id'].strip())
host_ids = request.form['host_ids'].strip()
alarmAdUrl = config.JSONCFG['shortcut']['falconUIC'] + "/api/v1/alarmadjust/whenendpointunbind"
GroupHost.unbind(group_id, host_ids)
for host_id in host_ids.split(","):
data = {'hostgroupId': group_id, 'hostId': host_id}
respCode = post2FeUpdateEventCase(alarmAdUrl, data)
if respCode != 200:
log.error(alarmAdUrl + " got " + str(respCode) + " with " + str(data))
return jsonify(msg='delete host is failed , please try again!')
return jsonify(msg='')
@app.route('/host/maintain', methods=['POST'])
def host_maintain_post():
begin = int(request.form['begin'].strip())
end = int(request.form['end'].strip())
host_ids = request.form['host_ids'].strip()
alarmAdUrl = config.JSONCFG['shortcut']['falconUIC'] + "/api/v1/alarmadjust/whenendpointonmaintain"
if begin <= 0 or end <= 0:
return jsonify(msg='begin or end is invalid')
for host_id in host_ids.split(","):
data = {'hostId': host_id, 'maintainBegin': begin, 'maintainEnd': end}
respCode = post2FeUpdateEventCase(alarmAdUrl, data)
if respCode != 200:
log.error(alarmAdUrl + " got " + str(respCode) + " with " + str(data))
return jsonify(msg=Host.maintain(begin, end, host_ids))
# 取消maintain时间
@app.route('/host/reset', methods=['POST'])
def host_reset_post():
host_ids = request.form['host_ids'].strip()
return jsonify(msg=Host.no_maintain(host_ids))
@app.route('/host/add')
def host_add_get():
group_id = request.args.get('group_id', '')
if not group_id:
return jsonify(msg='no group_id given')
group_id = int(group_id)
group = HostGroup.read('id = %s', [group_id])
if not group:
return jsonify(msg='no such group')
return render_template('host/add.html', group=group, config=config)
@app.route('/host/add', methods=['POST'])
def host_add_post():
group_id = request.form['group_id']
if not group_id:
return jsonify(msg='no group_id given')
group_id = int(group_id)
group = HostGroup.read('id = %s', [group_id])
if not group:
return jsonify(msg='no such group')
hosts = request.form['hosts'].strip()
if not hosts:
return jsonify(msg='hosts is blank')
host_arr = hosts.splitlines()
safe_host_arr = [h for h in host_arr if h]
if not safe_host_arr:
return jsonify(msg='hosts is blank')
success = []
failure = []
for h in safe_host_arr:
msg = GroupHost.bind(group_id, h)
if not msg:
success.append('%s<br>' % h)
else:
failure.append('%s %s<br>' % (h, msg))
data = '<div class="alert alert-danger" role="alert">failure:<hr>' + ''.join(
failure) + '</div><div class="alert alert-success" role="alert">success:<hr>' + ''.join(success) + '</div>'
return jsonify(msg='', data=data)
# 展示某个机器bind的group
@app.route('/host/<host_id>/groups')
def host_groups_get(host_id):
host_id = int(host_id)
h = Host.read('id = %s', params=[host_id])
if not h:
return jsonify(msg='no such host')
group_ids = GroupHost.group_ids(h.id)
groups = [HostGroup.read('id = %s', [group_id]) for group_id in group_ids]
return render_template('host/groups.html', groups=groups, host=h, config=config)
@app.route('/host/<host_id>/templates')
def host_templates_get(host_id):
host_id = int(host_id)
h = Host.read('id = %s', params=[host_id])
if not h:
return jsonify(msg='no such host')
group_ids = GroupHost.group_ids(h.id)
templates = GrpTpl.tpl_set(group_ids)
for v in templates:
v.parent = Template.get(v.parent_id)
return render_template('host/templates.html', config=config, **locals())
@app.route('/host/unbind')
def host_unbind_get():
host_id = request.args.get('host_id', '').strip()
data = {'hostgroupId': group_id, 'hostId': host_id}
alarmAdUrl = config.JSONCFG['shortcut']['falconUIC'] + "/api/v1/alarmadjust/whenendpointunbind"
if not host_id:
return jsonify(msg='host_id is blank')
group_id = request.args.get('group_id', '').strip()
if not group_id:
return jsonify(msg='group_id is blank')
GroupHost.unbind(int(group_id), host_id)
respCode = post2FeUpdateEventCase(alarmAdUrl, data)
if respCode != 200:
log.error(alarmAdUrl + " got " + str(respCode) + " with " + str(data))
return jsonify(msg='')
| apache-2.0 | -4,357,326,261,906,748,400 | 31.943005 | 115 | 0.619534 | false | 3.232334 | true | false | false |
InitialState/python_appender | ISStreamer/Streamer.py | 1 | 10347 | # local config helper stuff
try:
import ISStreamer.configutil as configutil
except ImportError:
import configutil
try:
import ISStreamer.version as version
except ImportError:
import version
import uuid
# python 2 and 3 conversion support
import sys
if (sys.version_info < (2,7,0)):
sys.stderr.write("You need at least python 2.7.0 to use the ISStreamer")
exit(1)
elif (sys.version_info >= (3,0)):
import http.client as httplib
else:
import httplib
import json
# time stuff
import datetime
import time
# performance stuff
import threading
import collections
import csv
class Streamer:
BucketName = ""
AccessKey = ""
Channel = ""
BufferSize = 10
StreamApiBase = ""
LogQueue = None
DebugLevel = 0
BucketKey = ""
IsClosed = True
Offline = False
Async = True
LocalFile = None
ApiVersion = '<=0.0.4'
MissedEvents = None
def __init__(self, bucket_name="", bucket_key="", access_key="", ini_file_location=None, debug_level=0, buffer_size=10, offline=None, use_async=True):
config = configutil.getConfig(ini_file_location)
if (offline != None):
self.Offline = offline
else:
if (config["offline_mode"] == "false"):
self.Offline = False
else:
self.Offline = True
self.Async = use_async
if (self.Offline):
try:
file_location = "{}.csv".format(config["offline_file"])
self.LocalFileHandler = open(file_location, 'w')
self.LocalFile = csv.writer(self.LocalFileHandler)
except:
print("There was an issue opening the file (nees more description)")
if (config == None and bucket_name=="" and access_key == ""):
raise Exception("config not found and arguments empty")
if (bucket_name == ""):
bucket_name = config["bucket"]
else:
bucket_name = bucket_name
if (access_key == ""):
self.AccessKey = config["access_key"]
else:
self.AccessKey = access_key
#self.LogQueue = Queue.Queue(self.BufferSize)
self.BucketKey = bucket_key
self.BufferSize = buffer_size
self.LogQueue = collections.deque()
self.StreamApiBase = config["stream_api_base"]
self.set_bucket(bucket_name, bucket_key)
self.DebugLevel = debug_level
self.IsClosed = False
self.console_message("access_key: {accessKey}".format(accessKey=self.AccessKey))
self.console_message("stream_api_base: {api}".format(api=self.StreamApiBase))
def ship_to_api(self, resource, contents):
api_base = self.StreamApiBase
headers = {
'Content-Type': 'application/json',
'User-Agent': 'PyStreamer v' + version.__version__,
'Accept-Version': self.ApiVersion,
'X-IS-AccessKey': self.AccessKey,
'X-IS-BucketKey': self.BucketKey
}
def __ship(retry_attempts, wait=0):
conn = None
response = None
if (self.StreamApiBase.startswith('https://')):
api_base = self.StreamApiBase[8:]
self.console_message("ship {resource}: stream api base domain: {domain}".format(domain=api_base, resource=resource), level=2)
conn = httplib.HTTPSConnection(api_base, timeout=120)
else:
api_base = self.StreamApiBase[7:]
self.console_message("ship {resource}: stream api base domain: {domain}".format(domain=api_base, resource=resource), level=2)
conn = httplib.HTTPConnection(api_base, timeout=120)
retry_attempts = retry_attempts - 1
if (retry_attempts < 0):
if (self.DebugLevel >= 2):
raise Exception("shipping failed.. network issue?")
else:
self.console_message("ship: ISStreamer failed to ship after a number of attempts.", level=0)
if (self.MissedEvents == None):
self.MissedEvents = open("err_missed_events.txt", 'w+')
if (self.MissedEvents != None):
self.MissedEvents.write("{}\n".format(json.dumps(contents)))
return
try:
if (wait > 0):
self.console_message("ship-debug: pausing thread for {wait} seconds".format(wait=wait))
time.sleep(wait)
conn.request('POST', resource, json.dumps(contents), headers)
response = conn.getresponse()
response_body = response.read()
if (response.status >= 200 and response.status < 300):
self.console_message("ship: status: " + str(response.status) + "\nheaders: " + str(response.msg), level=2)
self.console_message("ship: body: " + str(response_body), level=3)
elif (response.status == 400):
json_err = None
try:
json_err = json.loads(response_body)
except Exception as ex:
pass
if json_err != None:
if (json_err["message"]["error"]["type"] == "BUCKET_REMOVED"):
self.console_message("Bucket Creation Failed: " + json_err["message"]["error"]["message"])
elif (response.status == 401 or response.status == 403):
self.console_message("ERROR: unauthorized access_key: " + self.AccessKey)
elif (response.status == 402):
self.console_message("AccessKey exceeded limit for month, check account")
raise Exception("PAYMENT_REQUIRED")
elif (response.status == 429):
if "Retry-After" in response.msg:
retry_after = response.msg["Retry-After"]
self.console_message("Request limit exceeded, wait {limit} seconds before trying again".format(limit=retry_after))
__ship(retry_attempts, int(retry_after)+1)
else:
self.console_message("Request limit exceeded")
else:
self.console_message("ship: failed on attempt {atmpt} (StatusCode: {sc}; Reason: {r})".format(sc=response.status, r=response.reason, atmpt=retry_attempts))
raise Exception("ship exception")
except Exception as ex:
if (len(ex.args) > 0 and ex.args[0] == "PAYMENT_REQUIRED"):
raise Exception("Either account is capped or an upgrade is required.")
self.console_message("ship: exception shipping on attempt {atmpt}.".format(atmpt=retry_attempts))
if (self.DebugLevel > 1):
raise ex
else:
self.console_message("exception gobbled: {}".format(str(ex)))
__ship(retry_attempts, 1)
__ship(3)
def set_bucket(self, bucket_name="", bucket_key="", retries=3):
def __create_bucket(new_bucket_name, new_bucket_key, access_key):
self.ship_to_api("/api/buckets", {'bucketKey': new_bucket_key, 'bucketName': new_bucket_name})
if (bucket_key == None or bucket_key == ""):
bucket_key = str(uuid.uuid4())
self.BucketKey = bucket_key
self.BucketName = bucket_name
if (not self.Offline):
if (self.Async):
t = threading.Thread(target=__create_bucket, args=(bucket_name, bucket_key, self.AccessKey))
t.daemon = False
t.start()
else:
__create_bucket(bucket_name, bucket_key, self.AccessKey)
else:
self.console_message("Working in offline mode.", level=0)
def console_message(self, message, level=1):
if (self.DebugLevel >= level):
print(message)
def ship_messages(self, messages, retries=3):
self.ship_to_api("/api/events", messages)
def flush(self):
if (self.Offline):
self.console_message("flush: no need, in offline mode", level=2)
return
messages = []
self.console_message("flush: checking queue", level=2)
isEmpty = False
while not isEmpty:
try:
m = self.LogQueue.popleft()
messages.append(m)
except IndexError:
isEmpty = True
self.console_message("flush: queue empty...", level=2)
if len(messages) > 0:
self.console_message("flush: queue not empty, shipping", level=2)
self.ship_messages(messages)
self.console_message("flush: finished flushing queue", level=2)
def log_object(self, obj, key_prefix=None, epoch=None):
if (epoch == None):
epoch = time.time()
if (key_prefix == None):
key_prefix = "{}_".format(str(type(obj).__name__))
elif (key_prefix != None and key_prefix != ""):
key_prefix = "{}_".format(key_prefix)
else:
key_prefix = ""
if (type(obj).__name__ == 'list'):
i = 0
for val in obj:
key_name = "{}{}".format(key_prefix, i)
self.log(key_name, val, epoch=epoch)
i += 1
elif (type(obj).__name__ == 'dict'):
for key in obj:
key_name = "{}{}".format(key_prefix, key)
self.log(key_name, obj[key], epoch=epoch)
else:
for attr in dir(obj):
if not isinstance(getattr(type(obj), attr, None), property):
continue
key_name = "{}{}".format(key_prefix, attr)
self.log(key_name, getattr(obj, attr), epoch=epoch)
def log(self, key, value, epoch=None):
def __ship_buffer():
i = self.BufferSize
messages = []
while(i > 0):
try:
m = self.LogQueue.popleft()
messages.append(m)
except IndexError:
i = 0
self.console_message("ship_buffer: queue empty")
i = i - 1
self.console_message("ship_buffer: shipping", level=2)
self.ship_messages(messages)
self.console_message("ship_buffer: finished shipping", level=2)
timeStamp = time.time()
gmtime = datetime.datetime.fromtimestamp(timeStamp)
if epoch != None:
try:
gmtime = datetime.datetime.fromtimestamp(epoch)
timeStamp = epoch
except:
self.console_message("epoch was overriden with invalid time, using current timstamp instead")
formatted_gmTime = gmtime.strftime('%Y-%m-%d %H:%M:%S.%f')
self.console_message("{time}: {key} {value}".format(key=key, value=value, time=formatted_gmTime))
if (not self.Offline):
if (len(self.LogQueue) >= self.BufferSize):
self.console_message("log: queue size approximately at or greater than buffer size, shipping!", level=10)
self.console_message("log: async is {}".format(self.Async))
if (self.Async):
self.console_message("log: spawning ship thread", level=3)
t = threading.Thread(target=__ship_buffer)
t.daemon = False
t.start()
else:
__ship_buffer()
self.console_message("log: queueing log item", level=2)
log_item = {
"key": key,
"value": value,
"epoch": timeStamp
}
self.LogQueue.append(log_item)
else:
self.LocalFile.writerow([timeStamp, key, value])
def close(self):
self.IsClosed = True
self.flush()
if (self.MissedEvents != None):
self.MissedEvents.close()
if (self.Offline):
self.console_message("closing local file handler", level=2)
self.LocalFileHandler.close()
def __del__(self):
"""Try to close/flush the cache before destruction"""
try:
if (not self.IsClosed):
self.close()
except:
if (self.DebugLevel >= 2):
raise Exception("failed to close the buffer, make sure to explicitly call close() on the Streamer")
else:
self.console_message("failed to close the buffer, make sure to explicitly call close() on the Streamer", level=1)
| mit | -6,042,125,597,051,966,000 | 31.034056 | 160 | 0.667826 | false | 3.175875 | true | false | false |
15Dkatz/pants | src/python/pants/engine/build_files.py | 1 | 13721 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import collections
from os.path import dirname, join
import six
from pants.base.project_tree import Dir
from pants.base.specs import (AscendantAddresses, DescendantAddresses, SiblingAddresses,
SingleAddress)
from pants.build_graph.address import Address, BuildFileAddress
from pants.engine.addressable import (AddressableDescriptor, BuildFileAddresses, Collection,
Exactly, TypeConstraintError)
from pants.engine.fs import FilesContent, PathGlobs, Snapshot
from pants.engine.mapper import AddressFamily, AddressMap, AddressMapper, ResolveError
from pants.engine.objects import Locatable, SerializableFactory, Validatable
from pants.engine.rules import RootRule, SingletonRule, TaskRule, rule
from pants.engine.selectors import Select, SelectDependencies, SelectProjection
from pants.engine.struct import Struct
from pants.util.objects import datatype
_SPECS_CONSTRAINT = Exactly(SingleAddress,
SiblingAddresses,
DescendantAddresses,
AscendantAddresses)
class ResolvedTypeMismatchError(ResolveError):
"""Indicates a resolved object was not of the expected type."""
def _key_func(entry):
key, value = entry
return key
class BuildDirs(datatype('BuildDirs', ['dependencies'])):
"""A list of Stat objects for directories containing build files."""
class BuildFiles(datatype('BuildFiles', ['files_content'])):
"""The FileContents of BUILD files in some directory"""
class BuildFileGlobs(datatype('BuildFilesGlobs', ['path_globs'])):
"""A wrapper around PathGlobs that are known to match a build file pattern."""
@rule(BuildFiles,
[SelectProjection(FilesContent, PathGlobs, 'path_globs', BuildFileGlobs)])
def build_files(files_content):
return BuildFiles(files_content)
@rule(BuildFileGlobs, [Select(AddressMapper), Select(Dir)])
def buildfile_path_globs_for_dir(address_mapper, directory):
patterns = address_mapper.build_patterns
return BuildFileGlobs(PathGlobs.create(directory.path, include=patterns, exclude=()))
@rule(AddressFamily, [Select(AddressMapper), Select(Dir), Select(BuildFiles)])
def parse_address_family(address_mapper, path, build_files):
"""Given the contents of the build files in one directory, return an AddressFamily.
The AddressFamily may be empty, but it will not be None.
"""
files_content = build_files.files_content.dependencies
if not files_content:
raise ResolveError('Directory "{}" does not contain build files.'.format(path))
address_maps = []
paths = (f.path for f in files_content)
ignored_paths = set(address_mapper.build_ignore_patterns.match_files(paths))
for filecontent_product in files_content:
if filecontent_product.path in ignored_paths:
continue
address_maps.append(AddressMap.parse(filecontent_product.path,
filecontent_product.content,
address_mapper.parser))
return AddressFamily.create(path.path, address_maps)
class UnhydratedStruct(datatype('UnhydratedStruct', ['address', 'struct', 'dependencies'])):
"""A product type that holds a Struct which has not yet been hydrated.
A Struct counts as "hydrated" when all of its members (which are not themselves dependencies
lists) have been resolved from the graph. This means that hydrating a struct is eager in terms
of inline addressable fields, but lazy in terms of the complete graph walk represented by
the `dependencies` field of StructWithDeps.
"""
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
return self.struct == other.struct
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.struct)
def _raise_did_you_mean(address_family, name):
possibilities = '\n '.join(':{}'.format(a.target_name) for a in address_family.addressables)
raise ResolveError('"{}" was not found in namespace "{}". '
'Did you mean one of:\n {}'
.format(name, address_family.namespace, possibilities))
@rule(UnhydratedStruct,
[Select(AddressMapper),
SelectProjection(AddressFamily, Dir, 'spec_path', Address),
Select(Address)])
def resolve_unhydrated_struct(address_mapper, address_family, address):
"""Given an Address and its AddressFamily, resolve an UnhydratedStruct.
Recursively collects any embedded addressables within the Struct, but will not walk into a
dependencies field, since those are requested explicitly by tasks using SelectDependencies.
"""
struct = address_family.addressables.get(address)
addresses = address_family.addressables
if not struct or address not in addresses:
_raise_did_you_mean(address_family, address.target_name)
dependencies = []
def maybe_append(outer_key, value):
if isinstance(value, six.string_types):
if outer_key != 'dependencies':
dependencies.append(Address.parse(value,
relative_to=address.spec_path,
subproject_roots=address_mapper.subproject_roots))
elif isinstance(value, Struct):
collect_dependencies(value)
def collect_dependencies(item):
for key, value in sorted(item._asdict().items(), key=_key_func):
if not AddressableDescriptor.is_addressable(item, key):
continue
if isinstance(value, collections.MutableMapping):
for _, v in sorted(value.items(), key=_key_func):
maybe_append(key, v)
elif isinstance(value, collections.MutableSequence):
for v in value:
maybe_append(key, v)
else:
maybe_append(key, value)
collect_dependencies(struct)
return UnhydratedStruct(
filter(lambda build_address: build_address == address, addresses)[0], struct, dependencies)
def hydrate_struct(address_mapper, unhydrated_struct, dependencies):
"""Hydrates a Struct from an UnhydratedStruct and its satisfied embedded addressable deps.
Note that this relies on the guarantee that DependenciesNode provides dependencies in the
order they were requested.
"""
address = unhydrated_struct.address
struct = unhydrated_struct.struct
def maybe_consume(outer_key, value):
if isinstance(value, six.string_types):
if outer_key == 'dependencies':
# Don't recurse into the dependencies field of a Struct, since those will be explicitly
# requested by tasks. But do ensure that their addresses are absolute, since we're
# about to lose the context in which they were declared.
value = Address.parse(value,
relative_to=address.spec_path,
subproject_roots=address_mapper.subproject_roots)
else:
value = dependencies[maybe_consume.idx]
maybe_consume.idx += 1
elif isinstance(value, Struct):
value = consume_dependencies(value)
return value
# NB: Some pythons throw an UnboundLocalError for `idx` if it is a simple local variable.
maybe_consume.idx = 0
# 'zip' the previously-requested dependencies back together as struct fields.
def consume_dependencies(item, args=None):
hydrated_args = args or {}
for key, value in sorted(item._asdict().items(), key=_key_func):
if not AddressableDescriptor.is_addressable(item, key):
hydrated_args[key] = value
continue
if isinstance(value, collections.MutableMapping):
container_type = type(value)
hydrated_args[key] = container_type((k, maybe_consume(key, v))
for k, v in sorted(value.items(), key=_key_func))
elif isinstance(value, collections.MutableSequence):
container_type = type(value)
hydrated_args[key] = container_type(maybe_consume(key, v) for v in value)
else:
hydrated_args[key] = maybe_consume(key, value)
return _hydrate(type(item), address.spec_path, **hydrated_args)
return consume_dependencies(struct, args={'address': address})
def _hydrate(item_type, spec_path, **kwargs):
# If the item will be Locatable, inject the spec_path.
if issubclass(item_type, Locatable):
kwargs['spec_path'] = spec_path
try:
item = item_type(**kwargs)
except TypeConstraintError as e:
raise ResolvedTypeMismatchError(e)
# Let factories replace the hydrated object.
if isinstance(item, SerializableFactory):
item = item.create()
# Finally make sure objects that can self-validate get a chance to do so.
if isinstance(item, Validatable):
item.validate()
return item
@rule(BuildFileAddresses,
[Select(AddressMapper),
SelectDependencies(AddressFamily, BuildDirs, field_types=(Dir,)),
Select(_SPECS_CONSTRAINT)])
def addresses_from_address_families(address_mapper, address_families, spec):
"""Given a list of AddressFamilies and a Spec, return matching Addresses.
Raises a ResolveError if:
- there were no matching AddressFamilies, or
- the Spec matches no addresses for SingleAddresses.
"""
if not address_families:
raise ResolveError('Path "{}" contains no BUILD files.'.format(spec.directory))
def exclude_address(address):
if address_mapper.exclude_patterns:
address_str = address.spec
return any(p.search(address_str) is not None for p in address_mapper.exclude_patterns)
return False
if type(spec) in (DescendantAddresses, SiblingAddresses, AscendantAddresses):
addresses = tuple(a
for af in address_families
for a in af.addressables.keys()
if not exclude_address(a))
elif type(spec) is SingleAddress:
# TODO Could assert len(address_families) == 1, as it should always be true in this case.
addresses = tuple(a
for af in address_families
for a in af.addressables.keys()
if a.target_name == spec.name and not exclude_address(a))
if not addresses:
if len(address_families) == 1:
_raise_did_you_mean(address_families[0], spec.name)
else:
raise ValueError('Unrecognized Spec type: {}'.format(spec))
return BuildFileAddresses(addresses)
@rule(BuildDirs, [Select(AddressMapper), Select(Snapshot)])
def filter_build_dirs(address_mapper, snapshot):
"""Given a Snapshot matching a build pattern, return parent directories as BuildDirs."""
dirnames = set(dirname(f.stat.path) for f in snapshot.files)
ignored_dirnames = address_mapper.build_ignore_patterns.match_files('{}/'.format(dirname) for dirname in dirnames)
ignored_dirnames = set(d.rstrip('/') for d in ignored_dirnames)
return BuildDirs(tuple(Dir(d) for d in dirnames if d not in ignored_dirnames))
@rule(PathGlobs, [Select(AddressMapper), Select(_SPECS_CONSTRAINT)])
def spec_to_globs(address_mapper, spec):
"""Given a Spec object, return a PathGlobs object for the build files that it matches."""
if type(spec) is DescendantAddresses:
directory = spec.directory
patterns = [join('**', pattern) for pattern in address_mapper.build_patterns]
elif type(spec) in (SiblingAddresses, SingleAddress):
directory = spec.directory
patterns = address_mapper.build_patterns
elif type(spec) is AscendantAddresses:
directory = ''
patterns = [
join(f, pattern)
for pattern in address_mapper.build_patterns
for f in _recursive_dirname(spec.directory)
]
else:
raise ValueError('Unrecognized Spec type: {}'.format(spec))
return PathGlobs.create(directory, include=patterns, exclude=[])
def _recursive_dirname(f):
"""Given a relative path like 'a/b/c/d', yield all ascending path components like:
'a/b/c/d'
'a/b/c'
'a/b'
'a'
''
"""
while f:
yield f
f = dirname(f)
yield ''
BuildFilesCollection = Collection.of(BuildFiles)
def create_graph_rules(address_mapper, symbol_table):
"""Creates tasks used to parse Structs from BUILD files.
:param address_mapper_key: The subject key for an AddressMapper instance.
:param symbol_table: A SymbolTable instance to provide symbols for Address lookups.
"""
symbol_table_constraint = symbol_table.constraint()
return [
TaskRule(BuildFilesCollection,
[SelectDependencies(BuildFiles, BuildDirs, field_types=(Dir,))],
BuildFilesCollection),
# A singleton to provide the AddressMapper.
SingletonRule(AddressMapper, address_mapper),
# Support for resolving Structs from Addresses.
TaskRule(
symbol_table_constraint,
[Select(AddressMapper),
Select(UnhydratedStruct),
SelectDependencies(symbol_table_constraint, UnhydratedStruct, field_types=(Address,))],
hydrate_struct
),
resolve_unhydrated_struct,
# BUILD file parsing.
parse_address_family,
build_files,
buildfile_path_globs_for_dir,
# Spec handling: locate directories that contain build files, and request
# AddressFamilies for each of them.
addresses_from_address_families,
filter_build_dirs,
spec_to_globs,
# Root rules representing parameters that might be provided via root subjects.
RootRule(Address),
RootRule(BuildFileAddress),
RootRule(AscendantAddresses),
RootRule(DescendantAddresses),
RootRule(SiblingAddresses),
RootRule(SingleAddress),
]
| apache-2.0 | 2,093,579,768,712,662,000 | 37.434174 | 116 | 0.693535 | false | 4.078775 | false | false | false |
julianofischer/caederm | caed/models.py | 1 | 2456 | # encoding: utf-8
# Author: Juliano Fischer Naves
# julianofischer at gmail dot com
# April, 2014
from django.db import models
from django.utils.translation import ugettext_lazy as _
# Create your models here.
class StudentClass(models.Model):
name = models.CharField(max_length=30,verbose_name='Turma')
def __unicode__(self):
return self.name
class Meta:
#Translators: The student class
verbose_name = _("Turma")
class Student(models.Model):
cpf = models.CharField(max_length=11,verbose_name="CPF")
name = models.CharField(max_length=60,verbose_name="Nome")
mother_name = models.CharField(max_length=60,verbose_name="Nome da mãe")
father_name = models.CharField(max_length=60,verbose_name="Nome do pai")
father_phone = models.CharField(max_length=60,verbose_name="Telefone do pai")
mother_phone = models.CharField(max_length=11,verbose_name="Telefone da mãe")
home_phone = models.CharField(max_length=11,verbose_name="Telefone de casa")
student_class = models.ForeignKey('StudentClass',verbose_name="Turma")
def __unicode__(self):
return self.name
class Meta:
verbose_name = _("Estudante")
verbose_name_plural = _("Estudantes")
#Ocorrência
class Incident(models.Model):
title = models.CharField(max_length=50,verbose_name=u"Título")
type = models.ForeignKey('IncidentType',verbose_name="Tipo")
description = models.TextField(verbose_name=u"Descrição")
measure_taken = models.TextField(verbose_name="Medida tomada")
student = models.ForeignKey('Student',verbose_name="Estudante")
student_class = models.ForeignKey('StudentClass',verbose_name='Turma')
date_time = models.DateTimeField(verbose_name='Data e hora')
archived = models.BooleanField(default=False,verbose_name='Arquivado')
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if self.pk is None:
self.student_class = self.student.student_class
super(Incident, self).save(*args,**kwargs)
class Meta:
verbose_name = _(u"Ocorrência")
verbose_name_plural = _(u"Ocorrências")
class IncidentType(models.Model):
title = models.CharField(max_length=30,verbose_name=u"Tipo de Ocorrência")
def __unicode__(self):
return self.title
class Meta:
verbose_name=("Tipo")
| gpl-3.0 | 1,138,426,551,920,276,600 | 34.985294 | 81 | 0.667348 | false | 3.500715 | false | false | false |
harshays/southwest | southwest/utils.py | 1 | 1537 | import os, sys, argparse
import datetime as dt
import threading
from functools import wraps
def _caffeinate():
os.system('caffeinate')
def caffeinate(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if sys.platform == 'darwin':
thrd = threading.Thread(target = _caffeinate, args = ())
# 'service' thread. does not stop process from terminating.
thrd.daemon = True
thrd.start()
fn(*args, **kwargs)
return wrapper
def get_single_args():
parser = argparse.ArgumentParser(description = "CLI for single southwest check-in")
parser.add_argument('firstname', help = "first name")
parser.add_argument('lastname', help = "last name")
parser.add_argument('code', help = "southwest code")
parser.add_argument('-d', '--date', help = "date (format is mm/dd/yyyy, default is today's date)", default = dt.datetime.now())
parser.add_argument('-t', '--time', help = "time (format is hh:mm, default is current time)", default = dt.datetime.now())
args = parser.parse_args()
if isinstance(args.date, dt.datetime):
args.date = args.date.strftime('%m/%d/%Y')
if isinstance(args.time, dt.datetime):
args.time = args.time.strftime('%H:%M')
return args
def get_multiple_args():
parser = argparse.ArgumentParser(description = "CLI for multiple southwest check ins")
parser.add_argument('csv', help = "csv file full path")
args = parser.parse_args()
return args
if __name__ == '__main__':
pass
| mit | -6,317,331,149,440,066,000 | 28.557692 | 131 | 0.635654 | false | 3.757946 | false | false | false |
samhoo/askbot-realworld | askbot/utils/forms.py | 1 | 8152 | import re
from django import forms
from django.http import str_to_unicode
from django.contrib.auth.models import User
from django.conf import settings
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from askbot.conf import settings as askbot_settings
from askbot.utils.slug import slugify
from askbot import const
import logging
import urllib
DEFAULT_NEXT = '/' + getattr(settings, 'ASKBOT_URL')
def clean_next(next, default = None):
if next is None or not next.startswith('/'):
if default:
return default
else:
return DEFAULT_NEXT
next = str_to_unicode(urllib.unquote(next), 'utf-8')
next = next.strip()
logging.debug('next url is %s' % next)
return next
def get_next_url(request, default = None):
return clean_next(request.REQUEST.get('next'), default)
class StrippedNonEmptyCharField(forms.CharField):
def clean(self, value):
value = value.strip()
if self.required and value == '':
raise forms.ValidationError(_('this field is required'))
return value
class NextUrlField(forms.CharField):
def __init__(self):
super(
NextUrlField,
self
).__init__(
max_length = 255,
widget = forms.HiddenInput(),
required = False
)
def clean(self,value):
return clean_next(value)
login_form_widget_attrs = { 'class': 'required login' }
class UserNameField(StrippedNonEmptyCharField):
RESERVED_NAMES = (u'fuck', u'shit', u'ass', u'sex', u'add',
u'edit', u'save', u'delete', u'manage', u'update', 'remove', 'new')
def __init__(
self,
db_model=User,
db_field='username',
must_exist=False,
skip_clean=False,
label=_('choose a username'),
**kw
):
self.must_exist = must_exist
self.skip_clean = skip_clean
self.db_model = db_model
self.db_field = db_field
self.user_instance = None
error_messages={
'required': _('user name is required'),
'taken': _('sorry, this name is taken, please choose another'),
'forbidden': _('sorry, this name is not allowed, please choose another'),
'missing': _('sorry, there is no user with this name'),
'multiple-taken': _('sorry, we have a serious error - user name is taken by several users'),
'invalid': _('user name can only consist of letters, empty space and underscore'),
'meaningless': _('please use at least some alphabetic characters in the user name'),
}
if 'error_messages' in kw:
error_messages.update(kw['error_messages'])
del kw['error_messages']
super(UserNameField,self).__init__(max_length=30,
widget=forms.TextInput(attrs=login_form_widget_attrs),
label=label,
error_messages=error_messages,
**kw
)
def clean(self,username):
""" validate username """
if self.skip_clean == True:
logging.debug('username accepted with no validation')
return username
if self.user_instance is None:
pass
elif isinstance(self.user_instance, User):
if username == self.user_instance.username:
logging.debug('username valid')
return username
else:
raise TypeError('user instance must be of type User')
try:
username = super(UserNameField, self).clean(username)
except forms.ValidationError:
raise forms.ValidationError(self.error_messages['required'])
username_regex = re.compile(const.USERNAME_REGEX_STRING, re.UNICODE)
if self.required and not username_regex.search(username):
raise forms.ValidationError(self.error_messages['invalid'])
if username in self.RESERVED_NAMES:
raise forms.ValidationError(self.error_messages['forbidden'])
if slugify(username, force_unidecode = True) == '':
raise forms.ValidationError(self.error_messages['meaningless'])
try:
user = self.db_model.objects.get(
**{'%s' % self.db_field : username}
)
if user:
if self.must_exist:
logging.debug('user exists and name accepted b/c here we validate existing user')
return username
else:
raise forms.ValidationError(self.error_messages['taken'])
except self.db_model.DoesNotExist:
if self.must_exist:
logging.debug('user must exist, so raising the error')
raise forms.ValidationError(self.error_messages['missing'])
else:
logging.debug('user name valid!')
return username
except self.db_model.MultipleObjectsReturned:
logging.debug('error - user with this name already exists')
raise forms.ValidationError(self.error_messages['multiple-taken'])
class UserEmailField(forms.EmailField):
def __init__(self,skip_clean=False,**kw):
self.skip_clean = skip_clean
super(UserEmailField,self).__init__(widget=forms.TextInput(attrs=dict(login_form_widget_attrs,
maxlength=200)), label=mark_safe(_('your email address')),
error_messages={'required':_('email address is required'),
'invalid':_('please enter a valid email address'),
'taken':_('this email is already used by someone else, please choose another'),
},
**kw
)
def clean(self,email):
""" validate if email exist in database
from legacy register
return: raise error if it exist """
email = super(UserEmailField,self).clean(email.strip())
if self.skip_clean:
return email
if askbot_settings.EMAIL_UNIQUE == True:
try:
user = User.objects.get(email = email)
logging.debug('email taken')
raise forms.ValidationError(self.error_messages['taken'])
except User.DoesNotExist:
logging.debug('email valid')
return email
except User.MultipleObjectsReturned:
logging.debug('email taken many times over')
raise forms.ValidationError(self.error_messages['taken'])
else:
return email
class SetPasswordForm(forms.Form):
password1 = forms.CharField(widget=forms.PasswordInput(attrs=login_form_widget_attrs),
label=_('choose password'),
error_messages={'required':_('password is required')},
)
password2 = forms.CharField(widget=forms.PasswordInput(attrs=login_form_widget_attrs),
label=mark_safe(_('retype password')),
error_messages={'required':_('please, retype your password'),
'nomatch':_('sorry, entered passwords did not match, please try again')},
)
def __init__(self, data=None, user=None, *args, **kwargs):
super(SetPasswordForm, self).__init__(data, *args, **kwargs)
def clean_password2(self):
"""
Validates that the two password inputs match.
"""
if 'password1' in self.cleaned_data:
if self.cleaned_data['password1'] == self.cleaned_data['password2']:
self.password = self.cleaned_data['password2']
self.cleaned_data['password'] = self.cleaned_data['password2']
return self.cleaned_data['password2']
else:
del self.cleaned_data['password2']
raise forms.ValidationError(self.fields['password2'].error_messages['nomatch'])
else:
return self.cleaned_data['password2']
| gpl-3.0 | -7,291,696,706,646,717,000 | 40.591837 | 121 | 0.57765 | false | 4.579775 | false | false | false |
e-baumer/sampling | sampling/stratified_rand.py | 1 | 5349 | from __future__ import division
from collections import defaultdict
import numpy as np
from base_sample import BaseSample
from sklearn.cluster import AffinityPropagation as AP
import pandas as pd
from collections import Counter
class StratifiedRandom(BaseSample):
def __init__(self, data_frame, number_arms=2):
super(StratifiedRandom, self).__init__(data_frame, number_arms)
def create_stratum(self, column_names, **kwargs):
'''
Use affinity propagation to find number of strata for each column.
column_names is a list of the covariates to be split into strata and
used for classification. This funciton adds a column to the data frame
for each column as column_name_strata that gives the strata designation
for that variable. The whole data frame is returned.
'''
for colname in column_names:
X = self.data[colname].reshape(-1, 1)
if np.isnan(X).any():
raise ValueError("There are NaN values in self.data[%s] that the \
clustering algorithm can't handle" % colname)
elif np.unique(self.data[colname]).shape[0] <=2:
string_name = colname+'_strata'
self.data[string_name] = self.data[colname].astype(int)
else:
af_model = AP(damping = 0.9)
strata_groups = af_model.fit(X)
#cluster_centers_indices = af.cluster_centers_indices_
#n_clusters_ = len(cluster_centers_indices)
string_name = colname+'_strata'
self.data[string_name] = strata_groups.labels_
return self.data
#In the main function, you need to call create_stratum before create_unique_strata
def create_unique_strata(self, column_names):
'''
The input should be self.data that has had the strata for each column
name assigned and had a pre-seeded randomization, meaning each arm
has at least one randomly assigned participant.
'''
#Create a column to store concatenated strata strings for each data point
self.data['strata_string'] = np.ones(len(self.data))*np.nan
#Initialize variables to be filled in during the loop
strata_unique = {}
#Loop through data points and create their strata strings
for ind in self.data.index.values:
similar_val = ''
for colname in column_names:
string_name = colname+'_strata'
similar_val += str(self.data[string_name].loc[ind])
#Add the total strata string for that data point
self.data['strata_string'].set_value(ind,similar_val)
#If the strata string exists, continue. If not, assign it a new value
if similar_val in list(strata_unique.keys()):
strata_unique[similar_val].append(ind)
continue
else:
strata_unique[similar_val] = [ind]
return (strata_unique, self.data)
def count_arm_assignments(self, strata_unique, key):
'''
For each unique strata, count how many are assigned to each arm.
'''
#Initialize arm_tally that is the same length as the number of arms
arm_tally = np.zeros(self.n_arms)
#Loop through the values in the unique strata and count how many are in each arm
for value in strata_unique[key]:
#If it is not NaN, add one to the arm_tally for the data point's arm assignment
if np.isnan(self.data['arm_assignment'][value]) == False:
arm_tally[int(self.data['arm_assignment'][value]-1)] += 1;
return arm_tally
def assign_arms(self, column_names, percent_nan = 0.05):
'''
Loop through unique strata and assign each data point to an arm.
'''
#clear all values with NaNs
self.data = self.nan_finder(column_names, percent_nan)
#call create_stratum to create strata for each chosen covariate
self.data = self.create_stratum(column_names,preference=-50)
#combine the covariate strata into a unique strata identifier
(strata_unique, self.data) = self.create_unique_strata(column_names)
#initiate an empty column in the data frame for arm assignments
self.data['arm_assignment'] = np.ones(len(self.data))*np.nan
#Loop through the uniqie strata
for key in strata_unique.keys():
#Loop through the values in the unique stratum
for value in strata_unique[key]:
#update the arm_tally based on new assignments
arm_tally = self.count_arm_assignments(strata_unique, key);
ind_unique = np.where(arm_tally==np.min(arm_tally))[0]
self.data['arm_assignment'].set_value(
value, np.random.choice(list(ind_unique+1)
))
return self.data
#
| apache-2.0 | 4,018,266,252,111,023,600 | 39.522727 | 93 | 0.5745 | false | 4.34878 | false | false | false |
brokenseal/broke | examples/django/broke/blog/views.py | 1 | 2027 | import simplejson as json
from django.core import serializers
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from django.contrib.contenttypes.models import ContentType
from django.utils.html import strip_tags
from models import Entry
from forms import EntryForm
def save_entry(request):
if not request.is_ajax() or not request.method == 'POST':
raise Http404
form= EntryForm(request.POST)
if not form.is_valid():
return HttpResponse('{}', mimetype='application/javascript')
if 'pk' in request.POST:
entry= get_object_or_404(Entry, pk= request.POST['pk'])
form= EntryForm(request.POST, instance= entry)
entry= form.save(commit= False)
entry.body = strip_tags(entry.body)
entry.title = strip_tags(entry.title)
entry.save()
else:
entry= form.save(commit= False)
entry.body = strip_tags(entry.body)
entry.title = strip_tags(entry.title)
entry.save()
entry_content_type= ContentType.objects.get_for_model(entry.__class__)
response_data= json.dumps({
'pk': entry.pk,
'model': '%s.%s' % (entry_content_type.app_label, entry_content_type.model),
})
return HttpResponse(response_data, mimetype='application/javascript')
def delete_entry(request):
if not request.is_ajax() or not request.method == 'POST' and 'pk' in request.POST:
raise Http404
entry= get_object_or_404(Entry, pk= request.POST['pk'])
entry.delete()
response_data= json.dumps({
'operation': 'complete',
})
return HttpResponse(response_data, mimetype='application/javascript')
def get_data(request):
if not request.is_ajax():
raise Http404
entries= Entry.objects.all()
if len(request.GET):
params_dict= {}
for params in request.GET.iteritems():
param= str(params[0])
value= str(params[1])
params_dict[param]= value
entries= entries.filter(**params_dict)
return HttpResponse(serializers.serialize("json", entries), mimetype='application/javascript')
| bsd-3-clause | -219,378,214,440,639,200 | 25.767123 | 95 | 0.696103 | false | 3.35596 | false | false | false |
appleseedhq/cortex | python/IECore/RelativePreset.py | 2 | 22977 | ##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import os
import re
## Implements a Preset that represents changes between two Parameter objects.
# The comparison on elements in a ClassVectorParameters takes in consideration both the parameter name and
# the loaded class name in order to consider the "same" element. We do that do try to work around the fact
# that the parameter names ("p0", "p1", etc) are very simple and easy to reapper after a sequence of removal/addition
# operations in a ClassVectorParameter. The method is not 100% safe but should work for most cases.
# \todo Consider adding a protected member that is responsible for that comparison and enable derived classes to
# do other kinds of comparisons, for example, using additional parameters such as user labels.
#
class RelativePreset( IECore.Preset ) :
## \param currParameter, IECore.Parameter, represents the parameter state after all changes have been made.
## \param oldParameter, IECore.Parameter, represents the parameter state before any changes.
## \param compareFilter, callable function that receives currParameter and oldParameter child and it should
## return a boolean to indicate if the difference should be computed or not.
def __init__( self, currParameter=None, oldParameter=None, compareFilter = None ) :
IECore.Preset.__init__( self )
self.__data = IECore.CompoundObject()
if compareFilter is None :
self.__compareFilter = lambda x,y: True
else :
self.__compareFilter = compareFilter
# accepts no parameters at all.
if currParameter is None and oldParameter is None :
return
if not isinstance( currParameter, IECore.Parameter ) :
raise TypeError, "Parameter currParameter must be a IECore.Parameter object!"
if not oldParameter is None :
if not isinstance( oldParameter, IECore.Parameter ) :
raise TypeError, "Parameter oldParameter must be a IECore.Parameter object!"
if currParameter.typeId() != oldParameter.typeId() :
raise TypeError, "Mismatching types for currParameter and oldParameter!"
self.__grabParameterChanges( currParameter, oldParameter, self.__data )
## \see IECore.Preset.applicableTo
def applicableTo( self, parameterised, rootParameter ) :
return RelativePreset.__applicableTo( rootParameter, self.__data )
def getDiffData( self ):
"""Returns a IECore.CompoundObject instance that contains the description of all the differences between the two parameters provided when creating this preset."""
return self.__data.copy()
def setDiffData( self, data ):
"""Use this function to recreate a RelativePreset from data previously returned by getDiffData()."""
if not isinstance( data, IECore.CompoundObject ):
raise TypeError, "Invalid data type! Must be a IECore.CompoundObject"
self.__data = data.copy()
## \see IECore.Preset.__call__
def __call__( self, parameterised, rootParameter ) :
if not self.applicableTo( parameterised, rootParameter ) :
raise RuntimeError, "Sorry, this preset is not applicable to the given parameter."
if len( self.__data ) :
self.__applyParameterChanges( rootParameter, self.__data )
def __grabParameterChanges( self, currParameter, oldParameter, data, paramPath = "" ) :
if not oldParameter is None:
if currParameter.staticTypeId() != oldParameter.staticTypeId() :
raise Exception, "Incompatible parameter %s!" % paramPath
if not self.__compareFilter( currParameter, oldParameter ) :
return
if isinstance( currParameter, IECore.ClassParameter ) :
self.__grabClassParameterChanges( currParameter, oldParameter, data, paramPath )
elif isinstance( currParameter, IECore.ClassVectorParameter ) :
self.__grabClassVectorParameterChanges( currParameter, oldParameter, data, paramPath )
elif isinstance( currParameter, IECore.CompoundParameter ) :
self.__grabCompoundParameterChanges( currParameter, oldParameter, data, paramPath )
else :
self.__grabSimpleParameterChanges( currParameter, oldParameter, data, paramPath )
def __grabCompoundParameterChanges( self, currParameter, oldParameter, data, paramPath ) :
for p in currParameter.keys() :
newData = IECore.CompoundObject()
childOldParam = None
if not oldParameter is None :
if p in oldParameter.keys() :
childOldParam = oldParameter[p]
self.__grabParameterChanges(
currParameter[p],
childOldParam,
newData,
paramPath + "." + p
)
if len(newData) :
data[p] = newData
if len(data):
data["_type_"] = IECore.StringData( "CompoundParameter" )
def __grabSimpleParameterChanges( self, currParameter, oldParameter, data, paramPath ) :
if not oldParameter is None :
if currParameter.getValue() == oldParameter.getValue() :
return
data["_type_"] = IECore.StringData( currParameter.typeName() )
data["_value_"] = currParameter.getValue().copy()
def __grabClassParameterChanges( self, currParameter, oldParameter, data, paramPath ) :
c = currParameter.getClass( True )
className = c[1]
classVersion = c[2]
classNameFilter = "*"
try :
classNameFilter = currParameter.userData()["UI"]["classNameFilter"].value
except :
pass
oldClassName = None
oldClassVersion = None
childOldParam = None
if not oldParameter is None :
oldClass = oldParameter.getClass( True )
oldClassName = oldClass[1]
oldClassVersion = oldClass[2]
if oldClass[0] :
childOldParam = oldClass[0].parameters()
classValue = IECore.CompoundObject()
if c[0] :
self.__grabParameterChanges(
c[0].parameters(),
childOldParam,
classValue,
paramPath
)
if len(classValue):
data["_classValue_"] = classValue
if len(data) or className != oldClassName or classVersion != oldClassVersion :
data["_className_"] = IECore.StringData(className)
data["_classVersion_"] = IECore.IntData(classVersion)
data["_classNameFilter_"] = IECore.StringData(classNameFilter)
data["_type_"] = IECore.StringData( "ClassParameter" )
def __grabClassVectorParameterChanges( self, currParameter, oldParameter, data, paramPath ) :
classes = currParameter.getClasses( True )
classNameFilter = "*"
try :
classNameFilter = currParameter.userData()["UI"]["classNameFilter"].value
except :
pass
classNameFilter = IECore.StringData( classNameFilter )
classNames = IECore.StringVectorData()
classVersions = IECore.IntVectorData()
classOrder = IECore.StringVectorData()
values = IECore.CompoundObject()
for c in classes:
pName = c[1]
classOrder.append( pName )
classNames.append( c[2] )
classVersions.append( c[3] )
v = IECore.CompoundObject()
childOldParam = None
if not oldParameter is None and pName in oldParameter.keys() :
oldClass = oldParameter.getClass( pName )
if oldClass :
childOldParam = oldClass.parameters()
self.__grabParameterChanges(
c[0].parameters(),
childOldParam,
v,
paramPath + "." + pName
)
if len(v) :
values[c[1]] = v
removedParams = []
if not oldParameter is None :
removedParams = list( set( oldParameter.keys() ).difference( classOrder ) )
if removedParams :
data["_removedParamNames_"] = IECore.StringVectorData( removedParams )
data["_removedClassNames_"] = IECore.StringVectorData()
for pName in removedParams :
oldClass = oldParameter.getClass( pName, True )
data["_removedClassNames_"].append( oldClass[1] )
modifiedParams = IECore.StringVectorData()
modifiedClassNames = IECore.StringVectorData()
modifiedClassVersions = IECore.IntVectorData()
addedParam = IECore.BoolVectorData()
for i in xrange(0,len(classOrder)):
pName = classOrder[i]
cName = classNames[i]
cVersion = classVersions[i]
oldClassName = None
oldClassVersion = None
if not oldParameter is None :
try:
oldClass = oldParameter.getClass( pName, True )
oldClassName = oldClass[1]
oldClassVersion = oldClass[2]
except Exception, e:
# added parameter...
pass
if cName != oldClassName or cVersion != oldClassVersion :
modifiedParams.append( pName )
modifiedClassNames.append( cName )
modifiedClassVersions.append( cVersion )
added = (oldClassName is None)
# if we are changing the class type, we have to mark as if we
# were removing it too
if cName != oldClassName and not oldClassName is None:
if not "_removedParamNames_" in data :
data["_removedParamNames_"] = IECore.StringVectorData()
data["_removedClassNames_"] = IECore.StringVectorData()
data["_removedParamNames_"].append(pName)
data["_removedClassNames_"].append(oldClassName)
removedParams.append(pName)
added = True
addedParam.append( added )
if len(modifiedParams) :
data["_modifiedParamsNames_"] = modifiedParams
data["_modifiedClassNames_"] = modifiedClassNames
data["_modifiedClassVersions_"] = modifiedClassVersions
data["_addedParam_"] = addedParam
# get all non-new parameters
parameterOrder = filter( lambda n: not n in modifiedParams or not addedParam[ modifiedParams.index(n) ], classOrder )
baseOrder = parameterOrder
if not oldParameter is None :
# get all non-deleted original parameters
baseOrder = filter( lambda n: not n in removedParams, oldParameter.keys() )
if baseOrder != parameterOrder :
if len(baseOrder) != len(parameterOrder):
raise Exception, "Unnexpected error. Unmatching parameter lists!"
# clamp to the smallest list containing the differences
for start in xrange(0,len(baseOrder)):
if baseOrder[start] != parameterOrder[start] :
break
for endPos in xrange(len(baseOrder),0,-1):
if baseOrder[endPos-1] != parameterOrder[endPos-1] :
break
data["_modifiedOrder_"] = IECore.StringVectorData( parameterOrder[start:endPos] )
if len(values):
# keep the original classes to which the parameters were edited
for pName in values.keys() :
values[pName]["_class_"] = IECore.StringData( classNames[classOrder.index(pName)] )
data["_values_"] = values
if len(data):
data["_classNameFilter_" ] = classNameFilter
data["_type_"] = IECore.StringData( "ClassVectorParameter" )
data["_paramNames_"] = classOrder
data["_classNames_"] = classNames
@staticmethod
def __applyParameterChanges( parameter, data, paramPath = "" ) :
if isinstance( parameter, IECore.ClassParameter ) :
RelativePreset.__applyClassParameterChanges( parameter, data, paramPath )
elif isinstance( parameter, IECore.ClassVectorParameter ) :
RelativePreset.__applyClassVectorChanges( parameter, data, paramPath )
elif isinstance( parameter, IECore.CompoundParameter ) :
RelativePreset.__applyCompoundParameterChanges( parameter, data, paramPath )
elif isinstance( parameter, IECore.Parameter ) :
RelativePreset.__applySimpleParameterChanges( parameter, data, paramPath )
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unrecognized type (%s) for parameter %s. Not affected by preset." % ( parameter.typeName(), parameter.name )
)
@staticmethod
def __applyCompoundParameterChanges( parameter, data, paramPath ) :
if data["_type_"].value != "CompoundParameter" :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to set preset on '%s'. Expected %s but found CompoundParameter."
% ( paramPath, data["_type_"].value )
)
return
for p in data.keys() :
if p in [ "_type_", "_class_" ] :
continue
if paramPath :
newParamPath = paramPath + "." + p
else :
newParamPath = p
if p not in parameter :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Could not find parameter '%s'. Preset value ignored." % newParamPath
)
continue
RelativePreset.__applyParameterChanges( parameter[p], data[p], newParamPath )
@staticmethod
def __applySimpleParameterChanges( parameter, data, paramPath ) :
if data["_type_"].value != parameter.typeName() :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to set preset on '%s'. Expected %s but found %s."
% ( paramPath, data["_type_"].value, parameter.typeName() )
)
return
try:
parameter.setValue( data["_value_"] )
except Exception, e:
IECore.msg( IECore.Msg.Level.Warning, "IECore.RelativePreset", str(e) )
@staticmethod
def __applyClassParameterChanges( parameter, data, paramPath ) :
if data["_type_"].value != "ClassParameter" :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to set preset on '%s'. Expected %s but found ClassParameter."
% ( paramPath, data["_type_"].value )
)
return
c = parameter.getClass( True )
className = data["_className_"].value
classVersion = data["_classVersion_"].value
if c[1] != className or c[2] != classVersion :
parameter.setClass( className, classVersion )
c = parameter.getClass( False )
if c and '_classValue_' in data :
RelativePreset.__applyParameterChanges( c.parameters(), data["_classValue_"], paramPath )
@staticmethod
def __applyClassVectorChanges( parameter, data, paramPath ) :
if data["_type_"].value != "ClassVectorParameter" :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to set preset on '%s'. Expected %s but found ClassVectorParameter."
% ( paramPath, data["_type_"].value )
)
return
# remove parameters if they match in parameter name and class name
if "_removedParamNames_" in data :
for (i,pName) in enumerate( data["_removedParamNames_"] ):
if pName in parameter.keys() :
c = parameter.getClass( pName, True )
if c and c[1] == data["_removedClassNames_"][i] :
parameter.removeClass( pName )
paramRemaps = {}
if "_modifiedParamsNames_" in data :
modifiedParams = data["_modifiedParamsNames_"]
modifiedClassNames = data["_modifiedClassNames_"]
modifiedClassVersions = data["_modifiedClassVersions_"]
addedParam = data["_addedParam_"]
addedCount = 0
# first modify items
for i in range( len( modifiedClassNames ) ) :
if addedParam[i] :
addedCount += 1
else :
# must find an existing matching parameter, no matter what
if modifiedParams[i] in parameter:
c = parameter.getClass( modifiedParams[i], True )
if modifiedClassNames[i] == c[1] :
if modifiedClassVersions[i] != c[2] :
parameter.setClass( modifiedParams[i], modifiedClassNames[i], modifiedClassVersions[i] )
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Parameter '%s.%s' has a different class. Expected %s but found %s. Ignoring class change on this parameter."
% ( paramPath, modifiedParams[i], modifiedClassNames[i], c[1] )
)
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to find parameter '%s.%s' in %s. Ignoring class change on this parameter."
% ( paramPath, modifiedParams[i], parameter.name )
)
# get a list of classes before the addition of new items
newOrder = False
newClassList = map( lambda c: c[1:], parameter.getClasses( True ) )
newParamList = map( lambda c: c[0], newClassList )
# compare each class with whatever existed when we created the RelativePreset and see which ones are the same
sameClasses = set()
for c in newClassList :
if '_modifiedParamsNames_' in data :
# If the preset has added this parameter it should not match current parameters in the vector, no matter if the class matches. Is it always the case?
if c[0] in data['_modifiedParamsNames_'] :
if data['_addedParam_'][ data['_modifiedParamsNames_'].index(c[0]) ] :
continue
try :
i = data['_paramNames_'].index(c[0])
except :
continue
if c[1] == data['_classNames_'][i] :
sameClasses.add( c[0] )
if "_modifiedOrder_" in data :
# there was some kind of change in the order of parameters as well...
modifiedOrder = filter( lambda pName: pName in sameClasses, data["_modifiedOrder_"] )
# find the range of parameters that lie between the reordered parameters in the current vector
firstParam = None
lastParam = None
for (i,pName) in enumerate(newParamList) :
if pName in modifiedOrder :
if firstParam is None:
firstParam = i
lastParam = i
if firstParam != lastParam :
# adds one by one the unknown parameters that lied between the reordered parameters.
for pName in newParamList[firstParam:lastParam+1] :
if not pName in modifiedOrder :
modifiedOrder.insert( modifiedOrder.index(baseParam)+1, pName )
baseParam = pName
def classOrder( c1, c2 ):
# if both elements were on the original reordering operation we use their relationship
if c1[0] in modifiedOrder and c2[0] in modifiedOrder:
i1 = modifiedOrder.index( c1[0] )
i2 = modifiedOrder.index( c2[0] )
return cmp( i1, i2 )
# otherwise we use the current order.
i1 = newParamList.index( c1[0] )
i2 = newParamList.index( c2[0] )
return cmp( i1, i2 )
newClassList.sort( classOrder )
newParamList = map( lambda c: c[0], newClassList )
newOrder = True
if "_modifiedParamsNames_" in data :
# now add items to the appropriate spot in the newClassList and newParamList
if addedCount :
newOrder = True
prevActualParam = None
lastActualParamInsertion = None
currClasses = parameter.getClasses( True )
for pName in data["_paramNames_"] :
if pName in sameClasses :
if pName in newParamList :
prevActualParam = pName
continue
if pName in modifiedParams :
i = modifiedParams.index(pName)
if addedParam[ i ] :
if prevActualParam is None :
if lastActualParamInsertion is None :
# Here we assume that the new parameter should
# go to the top because its predecessors don't exist on the
# new vector. Maybe it could also print a warning message..
lastActualParamInsertion = 0
else :
lastActualParamInsertion += 1
else :
lastActualParamInsertion = newParamList.index( prevActualParam ) + 1
prevActualParam = None
if pName in parameter:
newParamName = parameter.newParameterName()
if not re.match("^p[0-9]+$", pName) :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Custom parameter %s.%s is being renamed to %s..."
% ( paramPath, pName, newParamName )
)
paramRemaps[ pName ] = newParamName
pName = newParamName
# add the parameter to the vector, so that next calls to parameter.newParameterName() will work.
parameter.setClass( pName, modifiedClassNames[i], modifiedClassVersions[i] )
# update our official new arrays
newParamList.insert(lastActualParamInsertion, pName)
newClassList.insert(lastActualParamInsertion, (pName,modifiedClassNames[i], modifiedClassVersions[i]) )
# update parameters with new order
if newOrder :
parameter.setClasses( newClassList )
if "_values_" in data :
for paramName in data["_values_"].keys() :
remapedParamName = paramRemaps.get( paramName, paramName )
presetValue = data["_values_"][paramName]
if remapedParamName in parameter.keys() :
c = parameter.getClass( remapedParamName, True )
if c[1] == presetValue["_class_"].value :
RelativePreset.__applyParameterChanges(
c[0].parameters(),
presetValue,
paramPath + "." + remapedParamName
)
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Ignoring preset values for parameter %s.%s. Expected class %s but found %s."
% ( paramPath, remapedParamName, presetValue["_class_"].value, c[1] )
)
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to find parameter '%s.%s' in %s. Ignoring this preset changes."
% ( paramPath, remapedParamName, parameter.name )
)
@staticmethod
def __applicableTo( parameter, data ) :
if len(data) == 0 :
return True
if parameter.staticTypeId() == IECore.TypeId.CompoundParameter :
if data["_type_"].value != "CompoundParameter":
return False
elif isinstance( parameter, IECore.ClassParameter ) :
if data["_type_"].value != "ClassParameter":
return False
classNameFilter = "*"
try :
classNameFilter = parameter.userData()["UI"]["classNameFilter"].value
except :
pass
if classNameFilter != data["_classNameFilter_"].value:
return False
elif isinstance( parameter, IECore.ClassVectorParameter ) :
if data["_type_"].value != "ClassVectorParameter":
return False
classNameFilter = "*"
try :
classNameFilter = parameter.userData()["UI"]["classNameFilter"].value
except :
pass
if classNameFilter != data["_classNameFilter_"].value:
return False
else :
if data["_type_"].value != parameter.typeName():
return False
if not parameter.valueValid( data["_value_"] )[0]:
return False
return True
IECore.registerRunTimeTyped( RelativePreset )
| bsd-3-clause | -4,797,742,808,319,548,000 | 32.251809 | 164 | 0.689994 | false | 3.664593 | false | false | false |
bally12345/enigma2 | lib/python/Components/Converter/ClockToText.py | 1 | 2991 | from Converter import Converter
from time import localtime, strftime
from Components.Element import cached
class ClockToText(Converter, object):
DEFAULT = 0
WITH_SECONDS = 1
IN_MINUTES = 2
DATE = 3
FORMAT = 4
AS_LENGTH = 5
TIMESTAMP = 6
FULL = 7
SHORT_DATE = 8
LONG_DATE = 9
VFD = 10
FULL_DATE = 11
# add: date, date as string, weekday, ...
# (whatever you need!)
def __init__(self, type):
Converter.__init__(self, type)
if type == "WithSeconds":
self.type = self.WITH_SECONDS
elif type == "InMinutes":
self.type = self.IN_MINUTES
elif type == "Date":
self.type = self.DATE
elif type == "AsLength":
self.type = self.AS_LENGTH
elif type == "Timestamp":
self.type = self.TIMESTAMP
elif type == "Full":
self.type = self.FULL
elif type == "ShortDate":
self.type = self.SHORT_DATE
elif type == "LongDate":
self.type = self.LONG_DATE
elif type == "FullDate":
self.type = self.FULL_DATE
elif type == "VFD":
self.type = self.VFD
elif "Format" in type:
self.type = self.FORMAT
self.fmt_string = type[7:]
else:
self.type = self.DEFAULT
@cached
def getText(self):
time = self.source.time
if time is None:
return ""
# handle durations
if self.type == self.IN_MINUTES:
return ngettext("%d Min", "%d Mins", (time / 60)) % (time / 60)
elif self.type == self.AS_LENGTH:
if time < 0:
return ""
return "%d:%02d" % (time / 60, time % 60)
elif self.type == self.TIMESTAMP:
return str(time)
t = localtime(time)
if self.type == self.WITH_SECONDS:
# TRANSLATORS: full time representation hour:minute:seconds
return _("%2d:%02d:%02d") % (t.tm_hour, t.tm_min, t.tm_sec)
elif self.type == self.DEFAULT:
# TRANSLATORS: short time representation hour:minute
return _("%2d:%02d") % (t.tm_hour, t.tm_min)
elif self.type == self.DATE:
# TRANSLATORS: full date representation dayname daynum monthname year in strftime() format! See 'man strftime'
d = _("%A %e %B %Y")
elif self.type == self.FULL:
# TRANSLATORS: long date representation short dayname daynum short monthname hour:minute in strftime() format! See 'man strftime'
d = _("%a %e/%m %-H:%M")
elif self.type == self.SHORT_DATE:
# TRANSLATORS: short date representation short dayname daynum short monthname in strftime() format! See 'man strftime'
d = _("%a %e/%m")
elif self.type == self.LONG_DATE:
# TRANSLATORS: long date representations dayname daynum monthname in strftime() format! See 'man strftime'
d = _("%A %e %B")
elif self.type == self.FULL_DATE:
# TRANSLATORS: full date representations sort dayname daynum monthname long year in strftime() format! See 'man strftime'
d = _("%a %e %B %Y")
elif self.type == self.VFD:
# TRANSLATORS: VFD hour:minute daynum short monthname in strftime() format! See 'man strftime'
d = _("%k:%M %e/%m")
elif self.type == self.FORMAT:
d = self.fmt_string
else:
return "???"
return strftime(d, t)
text = property(getText)
| gpl-2.0 | -1,581,685,355,648,117,200 | 29.520408 | 132 | 0.651287 | false | 2.946798 | false | false | false |
klahnakoski/MySQL-to-S3 | vendor/jx_sqlite/snowflake.py | 1 | 12852 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from collections import OrderedDict
from copy import copy
from jx_base import STRUCT, OBJECT, EXISTS, STRING
from jx_base.container import Container
from jx_base.queries import get_property_name
from jx_python import jx
from jx_python.meta import Column
from jx_sqlite import typed_column, UID, quoted_UID, quoted_GUID, sql_types, quoted_PARENT, quoted_ORDER, GUID, untyped_column
from mo_dots import relative_field, listwrap, split_field, join_field, wrap, startswith_field, concat_field, Null, coalesce, set_default
from mo_future import text_type
from mo_logs import Log
from pyLibrary.sql import SQL_FROM, sql_iso, sql_list, SQL_LIMIT, SQL_SELECT, SQL_ZERO, SQL_STAR
from pyLibrary.sql.sqlite import quote_column
class Snowflake(object):
"""
MANAGE SQLITE DATABASE
"""
def __init__(self, fact, uid, db):
self.fact = fact # THE CENTRAL FACT TABLE
self.uid = uid
self.db = db
self._columns = [] # EVERY COLUMN IS ACCESSIBLE BY EVERY TABLE IN THE SNOWFLAKE
self.tables = OrderedDict() # MAP FROM NESTED PATH TO Table OBJECT, PARENTS PROCEED CHILDREN
if not self.read_db():
self.create_fact(uid)
def read_db(self):
"""
PULL SCHEMA FROM DATABASE, BUILD THE MODEL
:return: None
"""
# FIND ALL TABLES
result = self.db.query("SELECT * FROM sqlite_master WHERE type='table' ORDER BY name")
tables = wrap([{k: d[i] for i, k in enumerate(result.header)} for d in result.data])
tables_found = False
for table in tables:
if table.name.startswith("__"):
continue
tables_found = True
nested_path = [join_field(split_field(tab.name)[1:]) for tab in jx.reverse(tables) if startswith_field(table.name, tab.name)]
self.add_table_to_schema(nested_path)
# LOAD THE COLUMNS
command = "PRAGMA table_info"+sql_iso(quote_column(table.name))
details = self.db.query(command)
for cid, name, dtype, notnull, dfft_value, pk in details.data:
if name.startswith("__"):
continue
cname, ctype = untyped_column(name)
column = Column(
names={np: relative_field(cname, np) for np in nested_path},
type=coalesce(ctype, {"TEXT": "string", "REAL": "number", "INTEGER": "integer"}.get(dtype)),
nested_path=nested_path,
es_column=name,
es_index=table.name
)
self.add_column_to_schema(column)
return tables_found
def create_fact(self, uid=UID):
"""
MAKE NEW TABLE WITH GIVEN guid
:param uid: name, or list of names, for the GUID
:return: None
"""
self.add_table_to_schema(["."])
uid = listwrap(uid)
new_columns = []
for u in uid:
if u == UID:
pass
else:
c = Column(
names={".": u},
type="string",
es_column=typed_column(u, "string"),
es_index=self.fact
)
self.add_column_to_schema(c)
new_columns.append(c)
command = (
"CREATE TABLE " + quote_column(self.fact) + sql_iso(sql_list(
[quoted_GUID + " TEXT "] +
[quoted_UID + " INTEGER"] +
[quote_column(c.es_column) + " " + sql_types[c.type] for c in self.tables["."].schema.columns] +
["PRIMARY KEY " + sql_iso(sql_list(
[quoted_GUID] +
[quoted_UID] +
[quote_column(c.es_column) for c in self.tables["."].schema.columns]
))]
))
)
self.db.execute(command)
def change_schema(self, required_changes):
"""
ACCEPT A LIST OF CHANGES
:param required_changes:
:return: None
"""
required_changes = wrap(required_changes)
for required_change in required_changes:
if required_change.add:
self._add_column(required_change.add)
elif required_change.nest:
column, cname = required_change.nest
self._nest_column(column, cname)
# REMOVE KNOWLEDGE OF PARENT COLUMNS (DONE AUTOMATICALLY)
# TODO: DELETE PARENT COLUMNS? : Done
def _add_column(self, column):
cname = column.names["."]
if column.type == "nested":
# WE ARE ALSO NESTING
self._nest_column(column, [cname]+column.nested_path)
table = concat_field(self.fact, column.nested_path[0])
self.db.execute(
"ALTER TABLE " + quote_column(table) +
" ADD COLUMN " + quote_column(column.es_column) + " " + sql_types[column.type]
)
self.add_column_to_schema(column)
def _nest_column(self, column, new_path):
destination_table = concat_field(self.fact, new_path[0])
existing_table = concat_field(self.fact, column.nested_path[0])
# FIND THE INNER COLUMNS WE WILL BE MOVING
moving_columns = []
for c in self._columns:
if destination_table!=column.es_index and column.es_column==c.es_column:
moving_columns.append(c)
c.nested_path = new_path
# TODO: IF THERE ARE CHILD TABLES, WE MUST UPDATE THEIR RELATIONS TOO?
# DEFINE A NEW TABLE?
# LOAD THE COLUMNS
command = "PRAGMA table_info"+sql_iso(quote_column(destination_table))
details = self.db.query(command)
if not details.data:
command = (
"CREATE TABLE " + quote_column(destination_table) + sql_iso(sql_list([
quoted_UID + "INTEGER",
quoted_PARENT + "INTEGER",
quoted_ORDER + "INTEGER",
"PRIMARY KEY " + sql_iso(quoted_UID),
"FOREIGN KEY " + sql_iso(quoted_PARENT) + " REFERENCES " + quote_column(existing_table) + sql_iso(quoted_UID)
]))
)
self.db.execute(command)
self.add_table_to_schema(new_path)
# TEST IF THERE IS ANY DATA IN THE NEW NESTED ARRAY
if not moving_columns:
return
column.es_index = destination_table
self.db.execute(
"ALTER TABLE " + quote_column(destination_table) +
" ADD COLUMN " + quote_column(column.es_column) + " " + sql_types[column.type]
)
# Deleting parent columns
for col in moving_columns:
column = col.es_column
tmp_table = "tmp_" + existing_table
columns = list(map(text_type, self.db.query(SQL_SELECT + SQL_STAR + SQL_FROM + quote_column(existing_table) + SQL_LIMIT + SQL_ZERO).header))
self.db.execute(
"ALTER TABLE " + quote_column(existing_table) +
" RENAME TO " + quote_column(tmp_table)
)
self.db.execute(
"CREATE TABLE " + quote_column(existing_table) + " AS " +
SQL_SELECT + sql_list([quote_column(c) for c in columns if c != column]) +
SQL_FROM + quote_column(tmp_table)
)
self.db.execute("DROP TABLE " + quote_column(tmp_table))
def add_table_to_schema(self, nested_path):
table = Table(nested_path)
self.tables[table.name] = table
path = table.name
for c in self._columns:
rel_name = c.names[path] = relative_field(c.names["."], path)
table.schema.add(rel_name, c)
return table
@property
def columns(self):
return self._columns
def add_column_to_schema(self, column):
self._columns.append(column)
abs_name = column.names["."]
for table in self.tables.values():
rel_name = column.names[table.name] = relative_field(abs_name, table.name)
table.schema.add(rel_name, column)
table.columns.append(column)
class Table(Container):
def __init__(self, nested_path):
self.nested_path = nested_path
self._schema = Schema(nested_path)
self.columns = [] # PLAIN DATABASE COLUMNS
@property
def name(self):
"""
:return: THE TABLE NAME RELATIVE TO THE FACT TABLE
"""
return self.nested_path[0]
@property
def schema(self):
return self._schema
class Schema(object):
"""
A Schema MAPS ALL COLUMNS IN SNOWFLAKE FROM THE PERSPECTIVE OF A SINGLE TABLE (a nested_path)
"""
def __init__(self, nested_path):
if nested_path[-1] != '.':
Log.error("Expecting full nested path")
source = Column(
names={".": "."},
type=OBJECT,
es_column="_source",
es_index=nested_path,
nested_path=nested_path
)
guid = Column(
names={".": GUID},
type=STRING,
es_column=GUID,
es_index=nested_path,
nested_path=nested_path
)
self.namespace = {".": {source}, GUID: {guid}}
self._columns = [source, guid]
self.nested_path = nested_path
def add(self, column_name, column):
if column_name != column.names[self.nested_path[0]]:
Log.error("Logic error")
self._columns.append(column)
for np in self.nested_path:
rel_name = column.names[np]
container = self.namespace.setdefault(rel_name, set())
hidden = [
c
for c in container
if len(c.nested_path[0]) < len(np)
]
for h in hidden:
container.remove(h)
container.add(column)
container = self.namespace.setdefault(column.es_column, set())
container.add(column)
def remove(self, column_name, column):
if column_name != column.names[self.nested_path[0]]:
Log.error("Logic error")
self.namespace[column_name] = [c for c in self.namespace[column_name] if c != column]
def __getitem__(self, item):
output = self.namespace.get(item, Null)
return output
def __copy__(self):
output = Schema(self.nested_path)
for k, v in self.namespace.items():
output.namespace[k] = copy(v)
return output
def get_column_name(self, column):
"""
RETURN THE COLUMN NAME, FROM THE PERSPECTIVE OF THIS SCHEMA
:param column:
:return: NAME OF column
"""
return get_property_name(column.names[self.nested_path[0]])
def keys(self):
return set(self.namespace.keys())
def items(self):
return list(self.namespace.items())
@property
def columns(self):
return [c for c in self._columns if c.es_column not in [GUID, '_source']]
def leaves(self, prefix):
head = self.namespace.get(prefix, None)
if not head:
return Null
full_name = list(head)[0].names['.']
return set(
c
for k, cs in self.namespace.items()
if startswith_field(k, full_name) and k != GUID or k == full_name
for c in cs
if c.type not in [OBJECT, EXISTS]
)
def map_to_sql(self, var=""):
"""
RETURN A MAP FROM THE RELATIVE AND ABSOLUTE NAME SPACE TO COLUMNS
"""
origin = self.nested_path[0]
if startswith_field(var, origin) and origin != var:
var = relative_field(var, origin)
fact_dict = {}
origin_dict = {}
for k, cs in self.namespace.items():
for c in cs:
if c.type in STRUCT:
continue
if startswith_field(get_property_name(k), var):
origin_dict.setdefault(c.names[origin], []).append(c)
if origin != c.nested_path[0]:
fact_dict.setdefault(c.names["."], []).append(c)
elif origin == var:
origin_dict.setdefault(concat_field(var, c.names[origin]), []).append(c)
if origin != c.nested_path[0]:
fact_dict.setdefault(concat_field(var, c.names["."]), []).append(c)
return set_default(origin_dict, fact_dict)
| mpl-2.0 | 6,842,269,990,975,783,000 | 33.829268 | 152 | 0.549876 | false | 3.893366 | false | false | false |
reiven/pungabot | modules/module_tell.py | 1 | 1418 | # -*- coding: utf-8 -*-
import string
from datetime import datetime
def sanitize(buf):
return filter(lambda x: x in string.printable, buf)
def handle_userJoined(bot, user, channel):
"""Someone Joined, lets salute him"""
dbCursor.execute("SELECT * FROM tell WHERE tell_to = '%s' AND tell_channel = '%s'" % (getNick(user), channel ))
rows = dbCursor.fetchall()
for row in rows:
bot.say(channel, '%s: %s leaved this message for you on %s at %s:' % (
getNick(user),
row[1].encode("utf-8"),
row[3].split()[0],
row[3].split()[1],
))
bot.say(channel, '"%s"' % row[4].encode("utf-8"))
dbCursor.execute("DELETE FROM tell WHERE tell_id = '%s'" % row[0])
def command_tell(bot, user, channel, args):
"""tell something to user when he/she rejoin the channel"""
if len(args.split()) >= 2:
tell_to, args = args.split(' ', 1)
dbCursor.execute("INSERT INTO tell VALUES (NULL, ?, ?, ?, ?, ?)", (
getNick(user),
unicode(tell_to, 'utf-8'),
datetime.now().strftime("%d-%m-%Y %H:%M"),
unicode(args, 'utf-8'),
channel
))
return bot.say(channel, '%s, i will tell that to %s' % (getNick(user), unicode(tell_to, 'utf-8') ))
else:
return bot.say(channel, '%s, for who and what you save a message?' % getNick(user))
| gpl-3.0 | 1,568,273,911,589,310,500 | 33.585366 | 115 | 0.550071 | false | 3.425121 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.