repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
ctiller/grpc
|
examples/python/multiplex/helloworld_pb2.py
|
146
|
3912
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: helloworld.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='helloworld.proto',
package='helloworld',
syntax='proto3',
serialized_pb=_b('\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x36\n\x1bio.grpc.examples.helloworldB\x0fHelloWorldProtoP\x01\xa2\x02\x03HLWb\x06proto3')
)
_HELLOREQUEST = _descriptor.Descriptor(
name='HelloRequest',
full_name='helloworld.HelloRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='helloworld.HelloRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=60,
)
_HELLOREPLY = _descriptor.Descriptor(
name='HelloReply',
full_name='helloworld.HelloReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='helloworld.HelloReply.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=91,
)
DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
DESCRIPTOR = _HELLOREQUEST,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloRequest)
))
_sym_db.RegisterMessage(HelloRequest)
HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
DESCRIPTOR = _HELLOREPLY,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloReply)
))
_sym_db.RegisterMessage(HelloReply)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW'))
_GREETER = _descriptor.ServiceDescriptor(
name='Greeter',
full_name='helloworld.Greeter',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=93,
serialized_end=166,
methods=[
_descriptor.MethodDescriptor(
name='SayHello',
full_name='helloworld.Greeter.SayHello',
index=0,
containing_service=None,
input_type=_HELLOREQUEST,
output_type=_HELLOREPLY,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_GREETER)
DESCRIPTOR.services_by_name['Greeter'] = _GREETER
# @@protoc_insertion_point(module_scope)
|
apache-2.0
|
KyleAMoore/KanjiNani
|
Android/.buildozer/android/platform/build/build/other_builds/kivy-python3crystax-sdl2/armeabi-v7a/kivy/examples/canvas/lines.py
|
16
|
6324
|
'''
Line (SmoothLine) Experiment
============================
This demonstrates the experimental and unfinished SmoothLine feature
for fast line drawing. You should see a multi-segment
path at the top of the screen, and sliders and buttons along the bottom.
You can click to add new points to the segment, change the transparency
and width of the line, or hit 'Animate' to see a set of sine and cosine
animations. The Cap and Joint buttons don't work: SmoothLine has not
implemented these features yet.
'''
from kivy.app import App
from kivy.properties import OptionProperty, NumericProperty, ListProperty, \
BooleanProperty
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.clock import Clock
from math import cos, sin
Builder.load_string('''
<LinePlayground>:
canvas:
Color:
rgba: .4, .4, 1, root.alpha
Line:
points: self.points
joint: self.joint
cap: self.cap
width: self.linewidth
close: self.close
Color:
rgba: .8, .8, .8, root.alpha_controlline
Line:
points: self.points
close: self.close
Color:
rgba: 1, .4, .4, root.alpha
Line:
points: self.points2
joint: self.joint
cap: self.cap
width: self.linewidth
close: self.close
GridLayout:
cols: 2
size_hint: 1, None
height: 44 * 5
GridLayout:
cols: 2
Label:
text: 'Alpha'
Slider:
value: root.alpha
on_value: root.alpha = float(args[1])
min: 0.
max: 1.
Label:
text: 'Alpha Control Line'
Slider:
value: root.alpha_controlline
on_value: root.alpha_controlline = float(args[1])
min: 0.
max: 1.
Label:
text: 'Width'
Slider:
value: root.linewidth
on_value: root.linewidth = args[1]
min: 1
max: 40
Label:
text: 'Cap'
GridLayout:
rows: 1
ToggleButton:
group: 'cap'
text: 'none'
on_press: root.cap = self.text
ToggleButton:
group: 'cap'
text: 'round'
on_press: root.cap = self.text
ToggleButton:
group: 'cap'
text: 'square'
on_press: root.cap = self.text
Label:
text: 'Joint'
GridLayout:
rows: 1
ToggleButton:
group: 'joint'
text: 'none'
on_press: root.joint = self.text
ToggleButton:
group: 'joint'
text: 'round'
on_press: root.joint = self.text
ToggleButton:
group: 'joint'
text: 'miter'
on_press: root.joint = self.text
ToggleButton:
group: 'joint'
text: 'bevel'
on_press: root.joint = self.text
Label:
text: 'Close'
ToggleButton:
text: 'Close line'
on_press: root.close = self.state == 'down'
AnchorLayout:
GridLayout:
cols: 1
size_hint: None, None
size: self.minimum_size
ToggleButton:
size_hint: None, None
size: 100, 44
text: 'Animate'
on_state: root.animate(self.state == 'down')
Button:
size_hint: None, None
size: 100, 44
text: 'Clear'
on_press: root.points = root.points2 = []
''')
class LinePlayground(FloatLayout):
alpha_controlline = NumericProperty(1.0)
alpha = NumericProperty(0.5)
close = BooleanProperty(False)
points = ListProperty([(500, 500),
[300, 300, 500, 300],
[500, 400, 600, 400]])
points2 = ListProperty([])
joint = OptionProperty('none', options=('round', 'miter', 'bevel', 'none'))
cap = OptionProperty('none', options=('round', 'square', 'none'))
linewidth = NumericProperty(10.0)
dt = NumericProperty(0)
_update_points_animation_ev = None
def on_touch_down(self, touch):
if super(LinePlayground, self).on_touch_down(touch):
return True
touch.grab(self)
self.points.append(touch.pos)
return True
def on_touch_move(self, touch):
if touch.grab_current is self:
self.points[-1] = touch.pos
return True
return super(LinePlayground, self).on_touch_move(touch)
def on_touch_up(self, touch):
if touch.grab_current is self:
touch.ungrab(self)
return True
return super(LinePlayground, self).on_touch_up(touch)
def animate(self, do_animation):
if do_animation:
self._update_points_animation_ev = Clock.schedule_interval(
self.update_points_animation, 0)
elif self._update_points_animation_ev is not None:
self._update_points_animation_ev.cancel()
def update_points_animation(self, dt):
cy = self.height * 0.6
cx = self.width * 0.1
w = self.width * 0.8
step = 20
points = []
points2 = []
self.dt += dt
for i in range(int(w / step)):
x = i * step
points.append(cx + x)
points.append(cy + cos(x / w * 8. + self.dt) * self.height * 0.2)
points2.append(cx + x)
points2.append(cy + sin(x / w * 8. + self.dt) * self.height * 0.2)
self.points = points
self.points2 = points2
class TestLineApp(App):
def build(self):
return LinePlayground()
if __name__ == '__main__':
TestLineApp().run()
|
gpl-3.0
|
baidu/Paddle
|
python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py
|
3
|
6585
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid.core as core
import numpy as np
import paddle.fluid.layers as layers
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.executor import Executor
from paddle.fluid.backward import append_backward
from paddle.fluid.layers.control_flow import split_lod_tensor
from paddle.fluid.layers.control_flow import merge_lod_tensor
class TestCPULoDTensorArrayOps(unittest.TestCase):
def place(self):
return core.CPUPlace()
def test_split_and_merge_lod_tensor_no_lod(self):
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place())
mask_np = np.array([0, 0, 1, 1, 1, 1, 0, 0, 0, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, self.place())
expect_true_tensor = np.array([2, 3, 4, 5]).astype('int32')
expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1)
expect_true = core.LoDTensor()
expect_true.set(expect_true_tensor, self.place())
expect_false_tensor = np.array([0, 1, 6, 7, 8, 9]).astype('int32')
expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1)
expect_false = core.LoDTensor()
expect_false.set(expect_false_tensor, self.place())
self.main(
tensor=tensor,
mask=mask,
expect_true=expect_true,
expect_false=expect_false,
expect_out=tensor)
def test_split_and_merge_lod_tensor_level_0(self):
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place())
tensor.set_recursive_sequence_lengths([[3, 6, 1]])
mask_np = np.array([0, 1, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, self.place())
expect_true_tensor = np.array([3, 4, 5, 6, 7, 8]).astype('int32')
expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1)
expect_true = core.LoDTensor()
expect_true.set(expect_true_tensor, self.place())
expect_true.set_recursive_sequence_lengths([[6]])
expect_false_tensor = np.array([0, 1, 2, 9]).astype('int32')
expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1)
expect_false_lod = [[3, 1]]
expect_false = core.LoDTensor()
expect_false.set(expect_false_tensor, self.place())
expect_false.set_recursive_sequence_lengths(expect_false_lod)
self.main(
tensor=tensor,
mask=mask,
expect_true=expect_true,
expect_false=expect_false,
expect_out=tensor)
def main(self, tensor, mask, expect_true, expect_false, expect_out,
level=0):
place = self.place()
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[1])
x.persistable = True
y = layers.data(name='y', shape=[1])
y.persistable = True
out_true, out_false = split_lod_tensor(input=x, mask=y, level=level)
out_true.persistable = True
out_false.persistable = True
out = merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
out.persistable = True
exe = Executor(place)
scope = core.Scope()
exe.run(program,
feed={'x': tensor,
'y': mask},
scope=scope,
return_numpy=False)
var_true = scope.find_var(out_true.name).get_tensor()
var_false = scope.find_var(out_false.name).get_tensor()
var_out = scope.find_var(out.name).get_tensor()
self.check_tensor_same(var_true, expect_true)
self.check_tensor_same(var_false, expect_false)
self.check_tensor_same(var_out, expect_out)
def check_tensor_same(self, actual, expect):
self.assertTrue(np.allclose(np.array(actual), np.array(expect)))
self.assertEqual(actual.recursive_sequence_lengths(),
expect.recursive_sequence_lengths())
class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
def test_grad(self):
place = core.CPUPlace()
program = Program()
with program_guard(program):
x = layers.data(
name='x', shape=[1], dtype='float32', stop_gradient=False)
y = layers.data(
name='y', shape=[1], dtype='bool', stop_gradient=False)
level = 0
out_true, out_false = split_lod_tensor(input=x, mask=y, level=level)
out = merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
mean = layers.mean(out)
append_backward(mean)
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place)
tensor.set_recursive_sequence_lengths([[3, 6, 1]])
mask_np = np.array([0, 1, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, place)
exe = Executor(place)
scope = core.Scope()
g_vars = program.global_block().var(x.name + "@GRAD")
g_out = [
item.sum()
for item in map(np.array,
exe.run(program,
feed={'x': tensor,
'y': mask},
fetch_list=[g_vars],
scope=scope,
return_numpy=False))
]
g_out_sum = np.array(g_out).sum()
self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
DrDub/icsisumm
|
icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/classifier/oner.py
|
9
|
2417
|
# Natural Language Toolkit - OneR
# Capable of classifying the test or gold data using the OneR algorithm
#
# Author: Sumukh Ghodke <sumukh dot ghodke at gmail dot com>
#
# URL: <http://nltk.sf.net>
# This software is distributed under GPL, for license information see LICENSE.TXT
from nltk_contrib.classifier import instances as ins, decisionstump as ds, Classifier
from nltk_contrib.classifier.exceptions import invaliddataerror as inv
class OneR(Classifier):
def __init__(self, training, attributes, klass):
Classifier.__init__(self, training, attributes, klass)
self.__best_decision_stump = None
def train(self):
Classifier.train(self)
self.__best_decision_stump = self.best_decision_stump(self.training)
def classify(self, instances):
for instance in instances:
instance.classified_klass = self.__best_decision_stump.klass(instance)
def best_decision_stump(self, instances, ignore_attributes = [], algorithm = 'minimum_error'):
decision_stumps = self.possible_decision_stumps(ignore_attributes, instances)
try:
return getattr(self, algorithm)(decision_stumps)
except AttributeError:
raise inv.InvalidDataError('Invalid algorithm to find the best decision stump. ' + str(algorithm) + ' is not defined.')
def possible_decision_stumps(self, ignore_attributes, instances):
"""
Returns a list of decision stumps, one for each attribute ignoring the ones present in the
ignore list. Each decision stump maintains a count of instances having particular attribute
values.
"""
decision_stumps = self.attributes.empty_decision_stumps(ignore_attributes, self.klass);
for stump in decision_stumps:
for instance in instances:
stump.update_count(instance)
return decision_stumps
def minimum_error(self, decision_stumps):
"""
Returns the decision stump with minimum error
"""
error, min_error_stump = 1, None
for decision_stump in decision_stumps:
new_error = decision_stump.error()
if new_error < error:
error = new_error
min_error_stump = decision_stump
return min_error_stump
def is_trained(self):
return self.__best_decision_stump is not None
|
gpl-3.0
|
madd-games/apocalypse
|
scripts/kernel.py
|
1
|
1708
|
# kernel.py
# Embeds the OpenCL code as a string inside the Apocalypse binary
# when the OpenCL functionality is enabled.
import sys, os
# Does some pre-processing
class KernelProcessor:
def __init__(self):
self.imported = []
apocMain = None
gameMain = ""
try:
f = open("Apoc/Kernels/ApocMain.cl")
apocMain = f.read()
f.close()
except IOError:
print "Error: for some reason, Apoc/Kernels/ApocMain.cl could not be opened."
sys.exit(1)
try:
f = open("Game/Kernels/Main.cl")
gameMain = f.read()
f.close()
except IOError:
pass
self.finalOutput = "const char *kernelCode = \"\\\n"
code = self.feed("ApocMain", apocMain) + self.feed("Main", gameMain)
for line in code.splitlines():
self.finalOutput += line.replace("\\", "\\\\").replace("\"", "\\\"") + "\\n\\\n"
self.finalOutput += "\";"
def feed(self, thismod, code):
output = ""
lines = code.splitlines()
lineno = 0
for line in lines:
lineno += 1
if line.startswith("#use "):
modname = line[5:]
if modname not in self.imported:
f = None
try:
f = open("Apoc/Kernels/%s.cl" % modname, "rb")
except IOError:
try:
f = open("Game/Kernels/%s.cl" % modname, "rb")
except IOError:
print "Kernel %s, line %d: module %s not found" % (thismod, lineno, modname)
sys.exit(1)
data = f.read()
f.close()
output += self.feed(modname, data)
self.imported.append(modname)
else:
output += line + "\n"
return output
def compileKernels(target, compiler):
p = KernelProcessor()
f = open("temp.cpp", "wb")
f.write(p.finalOutput)
f.close()
os.system("%s -c temp.cpp -o build-%s/kernels.o -w" % (compiler, target))
|
bsd-2-clause
|
AICP/kernel_yu_msm8916
|
scripts/rt-tester/rt-tester.py
|
11005
|
5307
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
gpl-2.0
|
barbagroup/pygbe
|
pygbe/util/semi_analytical.py
|
1
|
7124
|
"""
It contains the functions needed to compute the near singular integrals in
python. For big problems these functions were written in C++ and we call them
through the pycuda interface. At the end you have a commented test to compare
both types.
"""
import numpy
from pygbe.util.semi_analyticalwrap import SA_wrap_arr
def GQ_1D(K):
"""
Gauss quadrature in 1D.
Arguments
----------
K: int, desired number of gauss points.
Returns
--------
x: float, location of the gauss point.
w: float, weights of the gauss point.
"""
T = numpy.zeros((K, K))
nvec = numpy.arange(1., K)
beta = 0.5 / numpy.sqrt(1 - 1 / (2 * nvec)**2)
T = numpy.diag(beta, 1) + numpy.diag(beta, -1)
d, v = numpy.linalg.eig(T)
w = 2 * v[0]**2
x = d
return x, w
def lineInt(z, x, v1, v2, kappa, xk, wk):
"""
Line integral to solve the non-analytical part (integral in the angle) in
the semi_analytical integrals needed to calculate the potentials.
Arguments
----------
z : float, distance (height) between the plane of the triangle and the
collocation point.
x : float, position of the collocation point.
v1 : float, low extreme integral value.
v2 : float, high extreme integral value.
kappa : float, reciprocal of Debye length.
xk : float, position of the gauss point.
wk : float, weight of the gauss point.
Returns
--------
phi_Y : float, potential due to a Yukawa kernel.
dphi_Y: float, normal derivative of potential due to a Yukawa kernel.
phi_L : float, potential due to a Laplace kernel.
dphi_L: float, normal derivative of potential due to a Laplace kernel.
"""
theta1 = numpy.arctan2(v1, x)
theta2 = numpy.arctan2(v2, x)
dtheta = theta2 - theta1
absZ = abs(z)
if absZ < 1e-10: signZ = 0
else: signZ = z / absZ
dtheta = theta2 - theta1
thetam = (theta2 + theta1) / 2.
thetak = dtheta / 2 * xk + thetam
Rtheta = x / numpy.cos(thetak)
dy = x * numpy.tan(thetak)
R = numpy.sqrt(Rtheta**2 + z**2)
phi_Y = numpy.sum(-wk * (exp(-kappa * R) - exp(-kappa * absZ)) / kappa)
dphi_Y = -numpy.sum(wk *
(z / R * exp(-kappa * R) - exp(-kappa * absZ) * signZ))
phi_L = numpy.sum(wk * (R - absZ))
dphi_L = -numpy.sum(wk * (z / R - signZ))
phi_Y *= dtheta / 2
dphi_Y *= dtheta / 2
phi_L *= dtheta / 2
dphi_L *= dtheta / 2
return phi_Y, dphi_Y, phi_L, dphi_L
def intSide(v1, v2, p, kappa, xk, wk):
"""
It solves the integral line over one side of the triangle .
Arguments
----------
v1 : float, low extreme integral value.
v2 : float, high extreme integral value.
p : float, distance (height) between the plane of the triangle and the
collocation point.
kappa : float, reciprocal of Debye length.
xk : float, position of the gauss point.
wk : float, weight of the gauss point.
Returns
--------
phi_Y : float, potential due to a Yukawa kernel.
dphi_Y: float, normal derivative of potential due to a Yukawa kernel.
phi_L : float, potential due to a Laplace kernel.
dphi_L: float, normal derivative of potential due to a Laplace kernel.
"""
v21 = v2 - v1
L21 = numpy.linalg.norm(v21)
v21u = v21 / L21
orthog = numpy.cross(numpy.array([0, 0, 1]), v21u)
alpha = -numpy.dot(v21, v1) / L21**2
rOrthog = v1 + alpha * v21
d_toEdge = numpy.linalg.norm(rOrthog)
side_vec = numpy.cross(v21, -v1)
rotateToVertLine = numpy.zeros((3, 3))
rotateToVertLine[:, 0] = orthog
rotateToVertLine[:, 1] = v21u
rotateToVertLine[:, 2] = [0., 0., 1.]
v1new = numpy.dot(rotateToVertLine, v1)
if v1new[0] < 0:
v21u = -v21u
orthog = -orthog
rotateToVertLine[:, 0] = orthog
rotateToVertLine[:, 1] = v21u
v1new = numpy.dot(rotateToVertLine, v1)
v2new = numpy.dot(rotateToVertLine, v2)
rOrthognew = numpy.dot(rotateToVertLine, rOrthog)
x = v1new[0]
if v1new[1] > 0 and v2new[1] < 0 or v1new[1] < 0 and v2new[1] > 0:
phi1_Y, dphi1_Y, phi1_L, dphi1_L = lineInt(p, x, 0, v1new[1], kappa,
xk, wk)
phi2_Y, dphi2_Y, phi2_L, dphi2_L = lineInt(p, x, v2new[1], 0, kappa,
xk, wk)
phi_Y = phi1_Y + phi2_Y
dphi_Y = dphi1_Y + dphi2_Y
phi_L = phi1_L + phi2_L
dphi_L = dphi1_L + dphi2_L
else:
phi_Y, dphi_Y, phi_L, dphi_L = lineInt(p, x, v1new[1], v2new[1], kappa,
xk, wk)
phi_Y = -phi_Y
dphi_Y = -dphi_Y
phi_L = -phi_L
dphi_L = -dphi_L
return phi_Y, dphi_Y, phi_L, dphi_L
def SA_arr(y, x, kappa, same, xk, wk):
"""
It computes the integral line for all the sides of a triangle and for all
the collocation points.
Arguments
----------
y : array, vertices coordinates of the triangles.
x : array, collocation points.
kappa : float, reciprocal of Debye length.
same : int, 1 if the collocation point is in the panel of integration,
0 otherwise.
xk : float, position of the gauss point.
wk : float, weight of the gauss point.
Returns
--------
phi_Y : float, potential due to a Yukawa kernel.
dphi_Y: float, normal derivative of potential due to a Yukawa kernel.
phi_L : float, potential due to a Laplace kernel.
dphi_L: float, normal derivative of potential due to a Laplace kernel.
"""
N = len(x)
phi_Y = numpy.zeros(N)
dphi_Y = numpy.zeros(N)
phi_L = numpy.zeros(N)
dphi_L = numpy.zeros(N)
# Put first vertex at origin
y_panel = y - y[0]
x_panel = x - y[0]
# Find panel coordinate system X: 0->1
X = y_panel[1]
X = X / numpy.linalg.norm(X)
Z = numpy.cross(y_panel[1], y_panel[2])
Z = Z / numpy.linalg.norm(Z)
Y = numpy.cross(Z, X)
# Rotate coordinate system to match panel plane
rot_matrix = numpy.array([X, Y, Z])
panel_plane = numpy.transpose(numpy.dot(rot_matrix, numpy.transpose(
y_panel)))
x_plane = numpy.transpose(numpy.dot(rot_matrix, numpy.transpose(x_panel)))
for i in range(N):
# Shift origin so it matches collocation point
panel_final = panel_plane - numpy.array([x_plane[i, 0], x_plane[i, 1],
0])
# Loop over sides
for j in range(3):
if j == 2: nextJ = 0
else: nextJ = j + 1
phi_Y_aux, dphi_Y_aux, phi_L_aux, dphi_L_aux = intSide(
panel_final[j], panel_final[nextJ], x_plane[i, 2], kappa, xk,
wk)
phi_Y[i] += phi_Y_aux
dphi_Y[i] += dphi_Y_aux
phi_L[i] += phi_L_aux
dphi_L[i] += dphi_L_aux
if same[i] == 1:
dphi_Y[i] = 2 * pi
dphi_L[i] = -2 * pi
return phi_Y, dphi_Y, phi_L, dphi_L
|
bsd-3-clause
|
willprice/arduino-sphere-project
|
scripts/example_direction_finder/temboo/Library/Twitter/FriendsAndFollowers/CreateFriendship.py
|
5
|
4901
|
# -*- coding: utf-8 -*-
###############################################################################
#
# CreateFriendship
# Allows you to follow another Twitter user by specifying a Twitter user id or screen name.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CreateFriendship(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CreateFriendship Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CreateFriendship, self).__init__(temboo_session, '/Library/Twitter/FriendsAndFollowers/CreateFriendship')
def new_input_set(self):
return CreateFriendshipInputSet()
def _make_result_set(self, result, path):
return CreateFriendshipResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CreateFriendshipChoreographyExecution(session, exec_id, path)
class CreateFriendshipInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CreateFriendship
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret provided by Twitter or retrieved during the OAuth process.)
"""
super(CreateFriendshipInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token provided by Twitter or retrieved during the OAuth process.)
"""
super(CreateFriendshipInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The API Key (or Consumer Key) provided by Twitter.)
"""
super(CreateFriendshipInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The API Secret (or Consumer Secret) provided by Twitter.)
"""
super(CreateFriendshipInputSet, self)._set_input('ConsumerSecret', value)
def set_Follow(self, value):
"""
Set the value of the Follow input for this Choreo. ((optional, boolean) A boolean flag that enables notifications for the target user when set to true.)
"""
super(CreateFriendshipInputSet, self)._set_input('Follow', value)
def set_ScreenName(self, value):
"""
Set the value of the ScreenName input for this Choreo. ((conditional, string) The screen name for the friend you want to create a friendship with. Required if UserId isn't specified.)
"""
super(CreateFriendshipInputSet, self)._set_input('ScreenName', value)
def set_UserId(self, value):
"""
Set the value of the UserId input for this Choreo. ((conditional, string) The user id for the friend you want to create a friendship with. Required if ScreenName isn't specified.)
"""
super(CreateFriendshipInputSet, self)._set_input('UserId', value)
class CreateFriendshipResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CreateFriendship Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Twitter.)
"""
return self._output.get('Response', None)
class CreateFriendshipChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CreateFriendshipResultSet(response, path)
|
gpl-2.0
|
ClearingHouse/clearinghoused
|
lib/config.py
|
1
|
3846
|
import sys
import os
"""Variables prefixed with `DEFAULT` should be able to be overridden by
configuration file and command‐line arguments."""
UNIT = 100000000 # The same across assets.
# Versions
VERSION_MAJOR = 9
VERSION_MINOR = 47
VERSION_REVISION = 0
VERSION_STRING = str(VERSION_MAJOR) + '.' + str(VERSION_MINOR) + '.' + str(VERSION_REVISION)
# Counterparty protocol
TXTYPE_FORMAT = '>I'
TWO_WEEKS = 2 * 7 * 24 * 3600
MAX_EXPIRATION = 3600 * 60 # Two months
MEMPOOL_BLOCK_HASH = 'mempool'
MEMPOOL_BLOCK_INDEX = 9999999
# SQLite3
MAX_INT = 2**63 - 1
# Bitcoin Core
OP_RETURN_MAX_SIZE = 80 # bytes
# Currency agnosticism
BTC = 'VIA'
XCP = 'XCH'
BTC_NAME = 'Viacoin'
BTC_CLIENT = 'viacoind'
XCP_NAME = 'ClearingHouse'
XCP_CLIENT = 'clearinghoused'
DEFAULT_RPC_PORT_TESTNET = 17300
DEFAULT_RPC_PORT = 7300
DEFAULT_BACKEND_RPC_PORT_TESTNET = 25222
DEFAULT_BACKEND_RPC_PORT = 5222
UNSPENDABLE_TESTNET = 't7FjKY4NpTqUETtYCh1mrGwRMKzX9hkGd3'
UNSPENDABLE_MAINNET = 'Via2XCHoqQxACVuXf4vrajVDJetwVgxLMz'
ADDRESSVERSION_TESTNET = b'\x7f'
PRIVATEKEY_VERSION_TESTNET = b'\xff'
ADDRESSVERSION_MAINNET = b'\x47'
PRIVATEKEY_VERSION_MAINNET = b'\xc7'
MAGIC_BYTES_TESTNET = b'\xa9\xc5\xef\x92' # For bip-0010
MAGIC_BYTES_MAINNET = b'\x0f\x68\xc6\xcb' # For bip-0010
BLOCK_FIRST_TESTNET_TESTCOIN = 73800
BURN_START_TESTNET_TESTCOIN = 73800
BURN_END_TESTNET_TESTCOIN = 65700000 # Fifty years
BLOCK_FIRST_TESTNET = 73800
BURN_START_TESTNET = 73800
BURN_END_TESTNET = 65700000 # Fifty years
BLOCK_FIRST_MAINNET_TESTCOIN = 89100
BURN_START_MAINNET_TESTCOIN = 89100
BURN_END_MAINNET_TESTCOIN = 65700000 # A long time
BLOCK_FIRST_MAINNET = 86000
BURN_START_MAINNET = 89100
BURN_END_MAINNET = BURN_START_MAINNET + (3600 * 45)
# Protocol defaults
# NOTE: If the DUST_SIZE constants are changed, they MUST also be changed in xchblockd/lib/config.py as well
# TODO: This should be updated, given their new configurability.
# TODO: The dust values should be lowered by 90%, once transactions with smaller outputs start confirming faster: <https://github.com/mastercoin-MSC/spec/issues/192>
DEFAULT_REGULAR_DUST_SIZE = 56000 # TODO: This is just a guess. I got it down to 5530 satoshis.
DEFAULT_MULTISIG_DUST_SIZE = 2 * 56000 # <https://bitcointalk.org/index.php?topic=528023.msg7469941#msg7469941>
DEFAULT_OP_RETURN_VALUE = 0
DEFAULT_FEE_PER_KB = 100000 # Viacoin Core default is 100000.
# UI defaults
DEFAULT_FEE_FRACTION_REQUIRED = .009 # 0.90%
DEFAULT_FEE_FRACTION_PROVIDED = .01 # 1.00%
# Custom exit codes
EXITCODE_UPDATE_REQUIRED = 5
CONSENSUS_HASH_SEED = 'We can only see a short distance ahead, but we can see plenty there that needs to be done.'
CONSENSUS_HASH_VERSION = 2
CHECKPOINTS_MAINNET = {
BLOCK_FIRST_MAINNET: {'ledger_hash': '766ff0a9039521e3628a79fa669477ade241fc4c0ae541c3eae97f34b547b0b7', 'txlist_hash': '766ff0a9039521e3628a79fa669477ade241fc4c0ae541c3eae97f34b547b0b7'},
400000: {'ledger_hash': 'f513bfeec7de32e40b1b8db2c480b999e59be77f994964651a52c83c993724d0', 'txlist_hash': '07c47d5ea69195760d9062975464ec83387e3fa0b99398c5fd551e6a3604f1f4'}
}
CHECKPOINTS_TESTNET = {
BLOCK_FIRST_TESTNET: {'ledger_hash': '766ff0a9039521e3628a79fa669477ade241fc4c0ae541c3eae97f34b547b0b7', 'txlist_hash': '766ff0a9039521e3628a79fa669477ade241fc4c0ae541c3eae97f34b547b0b7'},
370000: {'ledger_hash': '96c7108c7285aa7b85977516c3b248e9a47a663e8db6276ee96da8d548b45b2a', 'txlist_hash': '285a348d1296e058a544f886784296b24f9d4456ced4ec8a9a3be4a27e2f7357'}
}
FIRST_MULTISIG_BLOCK_TESTNET = 370000
# Make DB snapshots every 100 blocks, try to use them to restore recent state on reorg to save reparse time.
# Experimental, relevant for chains with high orphaning rate.
# Set to True for clearinghoused, to False for upstream.
SHALLOW_REORG = True
|
mit
|
mrunge/horizon_lib
|
tools/install_venv_common.py
|
166
|
5958
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()
|
apache-2.0
|
Paul-Ezell/cinder-1
|
cinder/volume/drivers/lvm.py
|
4
|
33306
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for Linux servers running LVM.
"""
import math
import os
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
import six
from cinder.brick.local_dev import lvm as lvm
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
# FIXME(jdg): We'll put the lvm_ prefix back on these when we
# move over to using this as the real LVM driver, for now we'll
# rename them so that the config generation utility doesn't barf
# on duplicate entries.
volume_opts = [
cfg.StrOpt('volume_group',
default='cinder-volumes',
help='Name for the VG that will contain exported volumes'),
cfg.IntOpt('lvm_mirrors',
default=0,
help='If >0, create LVs with multiple mirrors. Note that '
'this requires lvm_mirrors + 2 PVs with available space'),
cfg.StrOpt('lvm_type',
default='default',
choices=['default', 'thin', 'auto'],
help='Type of LVM volumes to deploy; (default, thin, or auto). '
'Auto defaults to thin if thin is supported.'),
cfg.StrOpt('lvm_conf_file',
default='/etc/cinder/lvm.conf',
help='LVM conf file to use for the LVM driver in Cinder; '
'this setting is ignored if the specified file does '
'not exist (You can also specify \'None\' to not use '
'a conf file even if one exists).')
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class LVMVolumeDriver(driver.VolumeDriver):
"""Executes commands relating to Volumes."""
VERSION = '3.0.0'
def __init__(self, vg_obj=None, *args, **kwargs):
# Parent sets db, host, _execute and base config
super(LVMVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.hostname = socket.gethostname()
self.vg = vg_obj
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM'
# Target Driver is what handles data-transport
# Transport specific code should NOT be in
# the driver (control path), this way
# different target drivers can be added (iscsi, FC etc)
target_driver = \
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: %s',
target_driver)
self.target_driver = importutils.import_object(
target_driver,
configuration=self.configuration,
db=self.db,
executor=self._execute)
self.protocol = self.target_driver.protocol
self.sparse_copy_volume = False
def _sizestr(self, size_in_g):
return '%sg' % size_in_g
def _volume_not_present(self, volume_name):
return self.vg.get_volume(volume_name) is None
def _delete_volume(self, volume, is_snapshot=False):
"""Deletes a logical volume."""
if self.configuration.volume_clear != 'none' and \
self.configuration.lvm_type != 'thin':
self._clear_volume(volume, is_snapshot)
name = volume['name']
if is_snapshot:
name = self._escape_snapshot(volume['name'])
self.vg.delete(name)
def _clear_volume(self, volume, is_snapshot=False):
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
if is_snapshot:
# if the volume to be cleared is a snapshot of another volume
# we need to clear out the volume using the -cow instead of the
# directly volume path. We need to skip this if we are using
# thin provisioned LVs.
# bug# lp1191812
dev_path = self.local_path(volume) + "-cow"
else:
dev_path = self.local_path(volume)
# TODO(jdg): Maybe we could optimize this for snaps by looking at
# the cow table and only overwriting what's necessary?
# for now we're still skipping on snaps due to hang issue
if not os.path.exists(dev_path):
msg = (_('Volume device file path %s does not exist.')
% dev_path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
size_in_g = volume.get('volume_size') or volume.get('size')
if size_in_g is None:
msg = (_("Size for volume: %s not found, cannot secure delete.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
# clear_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
vol_sz_in_meg = size_in_g * units.Ki
volutils.clear_volume(
vol_sz_in_meg, dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
def _escape_snapshot(self, snapshot_name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not snapshot_name.startswith('snapshot'):
return snapshot_name
return '_' + snapshot_name
def _create_volume(self, name, size, lvm_type, mirror_count, vg=None):
vg_ref = self.vg
if vg is not None:
vg_ref = vg
vg_ref.create_volume(name, size, lvm_type, mirror_count)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
if self.vg is None:
LOG.warning(_LW('Unable to update stats on non-initialized '
'Volume Group: %s'),
self.configuration.volume_group)
return
self.vg.update_volume_group_info()
data = {}
# Note(zhiteng): These information are driver/backend specific,
# each driver may define these values in its own config options
# or fetch from driver specific configuration file.
data["volume_backend_name"] = self.backend_name
data["vendor_name"] = 'Open Source'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
data["pools"] = []
total_capacity = 0
free_capacity = 0
if self.configuration.lvm_mirrors > 0:
total_capacity =\
self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
free_capacity =\
self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors)
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
elif self.configuration.lvm_type == 'thin':
total_capacity = self.vg.vg_thin_pool_size
free_capacity = self.vg.vg_thin_pool_free_space
provisioned_capacity = self.vg.vg_provisioned_capacity
else:
total_capacity = self.vg.vg_size
free_capacity = self.vg.vg_free_space
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
location_info = \
('LVMVolumeDriver:%(hostname)s:%(vg)s'
':%(lvm_type)s:%(lvm_mirrors)s' %
{'hostname': self.hostname,
'vg': self.configuration.volume_group,
'lvm_type': self.configuration.lvm_type,
'lvm_mirrors': self.configuration.lvm_mirrors})
thin_enabled = self.configuration.lvm_type == 'thin'
# Calculate the total volumes used by the VG group.
# This includes volumes and snapshots.
total_volumes = len(self.vg.get_volumes())
# Skip enabled_pools setting, treat the whole backend as one pool
# XXX FIXME if multipool support is added to LVM driver.
single_pool = {}
single_pool.update(dict(
pool_name=data["volume_backend_name"],
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
reserved_percentage=self.configuration.reserved_percentage,
location_info=location_info,
QoS_support=False,
provisioned_capacity_gb=provisioned_capacity,
max_over_subscription_ratio=(
self.configuration.max_over_subscription_ratio),
thin_provisioning_support=thin_enabled,
thick_provisioning_support=not thin_enabled,
total_volumes=total_volumes,
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function(),
multiattach=True
))
data["pools"].append(single_pool)
# Check availability of sparse volume copy.
data['sparse_copy_volume'] = self.configuration.lvm_type == 'thin'
self._stats = data
def check_for_setup_error(self):
"""Verify that requirements are in place to use LVM driver."""
if self.vg is None:
root_helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
try:
self.vg = lvm.LVM(self.configuration.volume_group,
root_helper,
lvm_type=self.configuration.lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
except exception.VolumeGroupNotFound:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
vg_list = volutils.get_all_volume_groups(
self.configuration.volume_group)
vg_dict = \
next(vg for vg in vg_list if vg['name'] == self.vg.vg_name)
if vg_dict is None:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
pool_name = "%s-pool" % self.configuration.volume_group
if self.configuration.lvm_type == 'auto':
# Default to thin provisioning if it is supported and
# the volume group is empty, or contains a thin pool
# for us to use.
self.vg.update_volume_group_info()
self.configuration.lvm_type = 'default'
if volutils.supports_thin_provisioning():
if self.vg.get_volume(pool_name) is not None:
LOG.info(_LI('Enabling LVM thin provisioning by default '
'because a thin pool exists.'))
self.configuration.lvm_type = 'thin'
elif len(self.vg.get_volumes()) == 0:
LOG.info(_LI('Enabling LVM thin provisioning by default '
'because no LVs exist.'))
self.configuration.lvm_type = 'thin'
if self.configuration.lvm_type == 'thin':
# Specific checks for using Thin provisioned LV's
if not volutils.supports_thin_provisioning():
message = _("Thin provisioning not supported "
"on this version of LVM.")
raise exception.VolumeBackendAPIException(data=message)
if self.vg.get_volume(pool_name) is None:
try:
self.vg.create_thin_pool(pool_name)
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to create thin pool, "
"error message was: %s")
% six.text_type(exc.stderr))
raise exception.VolumeBackendAPIException(
data=exception_message)
# Enable sparse copy since lvm_type is 'thin'
self.sparse_copy_volume = True
def create_volume(self, volume):
"""Creates a logical volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from LVM for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
name_id = None
provider_location = None
if original_volume_status == 'available':
current_name = CONF.volume_name_template % new_volume['id']
original_volume_name = CONF.volume_name_template % volume['id']
try:
self.vg.rename_volume(current_name, original_volume_name)
except processutils.ProcessExecutionError:
LOG.error(_LE('Unable to rename the logical volume '
'for volume: %s'), volume['id'])
# If the rename fails, _name_id should be set to the new
# volume id and provider_location should be set to the
# one from the new volume as well.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
else:
# The back-end will not be renamed.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': provider_location}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
self.configuration.lvm_mirrors)
# Some configurations of LVM do not automatically activate
# ThinLVM snapshot LVs.
self.vg.activate_lv(snapshot['name'], is_snapshot=True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(jdg): We don't need to explicitly call
# remove export here because we already did it
# in the manager before we got here.
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
if self.vg.lv_has_snapshot(volume['name']):
LOG.error(_LE('Unable to delete due to existing snapshot '
'for volume: %s'), volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
LOG.info(_LI('Successfully deleted volume: %s'), volume['id'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']),
snapshot['volume_name'],
self.configuration.lvm_type)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_LW("snapshot: %s not found, "
"skipping delete operations"), snapshot['name'])
LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
# it's quite slow.
self._delete_volume(snapshot, is_snapshot=True)
def local_path(self, volume, vg=None):
if vg is None:
vg = self.configuration.volume_group
# NOTE(vish): stops deprecation warning
escaped_group = vg.replace('-', '--')
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
if self.configuration.lvm_type == 'thin':
self.vg.create_lv_snapshot(volume['name'],
src_vref['name'],
self.configuration.lvm_type)
if volume['size'] > src_vref['size']:
LOG.debug("Resize the new volume to %s.", volume['size'])
self.extend_volume(volume, volume['size'])
self.vg.activate_lv(volume['name'], is_snapshot=True,
permanent=True)
return
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % volume['id'],
'id': temp_id}
self.create_snapshot(temp_snapshot)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
try:
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True)
volutils.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
finally:
self.delete_snapshot(temp_snapshot)
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
return None, False
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup.volume_id)
temp_snapshot = None
previous_status = volume['previous_status']
if previous_status == 'in-use':
temp_snapshot = self._create_temp_snapshot(context, volume)
backup.temp_snapshot_id = temp_snapshot.id
backup.save()
volume_path = self.local_path(temp_snapshot)
else:
volume_path = self.local_path(volume)
try:
with utils.temporary_chown(volume_path):
with open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
finally:
if temp_snapshot:
self._delete_temp_snapshot(context, temp_snapshot)
backup.temp_snapshot_id = None
backup.save()
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
self.vg.extend_volume(volume['name'],
self._sizestr(new_size))
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV.
Renames the LV to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
"""
lv_name = existing_ref['source-name']
self.vg.get_volume(lv_name)
if volutils.check_already_managed_volume(self.db, lv_name):
raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name)
# Attempt to rename the LV to match the OpenStack internal name.
try:
self.vg.rename_volume(lv_name, volume['name'])
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to rename logical volume %(name)s, "
"error message was: %(err_msg)s")
% {'name': lv_name,
'err_msg': exc.stderr})
raise exception.VolumeBackendAPIException(
data=exception_message)
def manage_existing_object_get_size(self, existing_object, existing_ref,
object_type):
"""Return size of an existing LV for manage existing volume/snapshot.
existing_ref is a dictionary of the form:
{'source-name': <name of LV>}
"""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['source-name']
lv = self.vg.get_volume(lv_name)
# Raise an exception if we didn't find a suitable LV.
if not lv:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
try:
lv_size = int(math.ceil(float(lv['size'])))
except ValueError:
exception_message = (_("Failed to manage existing %(type)s "
"%(name)s, because reported size %(size)s "
"was not a floating-point number.")
% {'type': object_type,
'name': lv_name,
'size': lv['size']})
raise exception.VolumeBackendAPIException(
data=exception_message)
return lv_size
def manage_existing_get_size(self, volume, existing_ref):
return self.manage_existing_object_get_size(volume, existing_ref,
"volume")
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
if not isinstance(existing_ref, dict):
existing_ref = {"source-name": existing_ref}
return self.manage_existing_object_get_size(snapshot, existing_ref,
"snapshot")
def manage_existing_snapshot(self, snapshot, existing_ref):
dest_name = self._escape_snapshot(snapshot['name'])
snapshot_temp = {"name": dest_name}
if not isinstance(existing_ref, dict):
existing_ref = {"source-name": existing_ref}
return self.manage_existing(snapshot_temp, existing_ref)
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
"""Optimize the migration if the destination is on the same server.
If the specified host is another back-end on the same server, and
the volume is not attached, we can do the migration locally without
going through iSCSI.
"""
false_ret = (False, None)
if volume['status'] != 'available':
return false_ret
if 'location_info' not in host['capabilities']:
return false_ret
info = host['capabilities']['location_info']
try:
(dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\
info.split(':')
lvm_mirrors = int(lvm_mirrors)
except ValueError:
return false_ret
if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname):
return false_ret
if dest_vg != self.vg.vg_name:
vg_list = volutils.get_all_volume_groups()
try:
next(vg for vg in vg_list if vg['name'] == dest_vg)
except StopIteration:
LOG.error(_LE("Destination Volume Group %s does not exist"),
dest_vg)
return false_ret
helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
dest_vg_ref = lvm.LVM(dest_vg, helper,
lvm_type=lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
self._create_volume(volume['name'],
self._sizestr(volume['size']),
lvm_type,
lvm_mirrors,
dest_vg_ref)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
size_in_mb = int(volume['size']) * units.Ki
try:
volutils.copy_volume(self.local_path(volume),
self.local_path(volume, vg=dest_vg),
size_in_mb,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Volume migration failed due to "
"exception: %(reason)s."),
{'reason': six.text_type(e)}, resource=volume)
dest_vg_ref.delete(volume)
self._delete_volume(volume)
return (True, None)
else:
message = (_("Refusing to migrate volume ID: %(id)s. Please "
"check your configuration because source and "
"destination are the same Volume Group: %(name)s.") %
{'id': volume['id'], 'name': self.vg.vg_name})
LOG.exception(message)
raise exception.VolumeBackendAPIException(data=message)
def get_pool(self, volume):
return self.backend_name
# ####### Interface methods for DataPath (Target Driver) ########
def ensure_export(self, context, volume):
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume['name'])
model_update = \
self.target_driver.ensure_export(context, volume, volume_path)
return model_update
def create_export(self, context, volume, connector, vg=None):
if vg is None:
vg = self.configuration.volume_group
volume_path = "/dev/%s/%s" % (vg, volume['name'])
export_info = self.target_driver.create_export(
context,
volume,
volume_path)
return {'provider_location': export_info['location'],
'provider_auth': export_info['auth'], }
def remove_export(self, context, volume):
self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
return self.target_driver.initialize_connection(volume, connector)
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
return self.target_driver.terminate_connection(volume, connector,
**kwargs)
class LVMISCSIDriver(LVMVolumeDriver):
"""Empty class designation for LVMISCSI.
Since we've decoupled the inheritance of iSCSI and LVM we
don't really need this class any longer. We do however want
to keep it (at least for now) for back compat in driver naming.
"""
def __init__(self, *args, **kwargs):
super(LVMISCSIDriver, self).__init__(*args, **kwargs)
LOG.warning(_LW('LVMISCSIDriver is deprecated, you should '
'now just use LVMVolumeDriver and specify '
'iscsi_helper for the target driver you '
'wish to use.'))
class LVMISERDriver(LVMVolumeDriver):
"""Empty class designation for LVMISER.
Since we've decoupled the inheritance of data path in LVM we
don't really need this class any longer. We do however want
to keep it (at least for now) for back compat in driver naming.
"""
def __init__(self, *args, **kwargs):
super(LVMISERDriver, self).__init__(*args, **kwargs)
LOG.warning(_LW('LVMISERDriver is deprecated, you should '
'now just use LVMVolumeDriver and specify '
'iscsi_helper for the target driver you '
'wish to use. In order to enable iser, please '
'set iscsi_protocol with the value iser.'))
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: '
'cinder.volume.targets.iser.ISERTgtAdm')
self.target_driver = importutils.import_object(
'cinder.volume.targets.iser.ISERTgtAdm',
configuration=self.configuration,
db=self.db,
executor=self._execute)
|
apache-2.0
|
moyogo/robofontmechanic
|
src/lib/site-packages/requests/packages/chardet/cp949prober.py
|
2801
|
1782
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
|
mit
|
MrSurly/micropython-esp32
|
tests/basics/namedtuple1.py
|
18
|
1495
|
try:
try:
from collections import namedtuple
except ImportError:
from ucollections import namedtuple
except ImportError:
print("SKIP")
raise SystemExit
T = namedtuple("Tup", ["foo", "bar"])
# CPython prints fully qualified name, what we don't bother to do so far
#print(T)
for t in T(1, 2), T(bar=1, foo=2):
print(t)
print(t[0], t[1])
print(t.foo, t.bar)
print(len(t))
print(bool(t))
print(t + t)
print(t * 3)
print([f for f in t])
print(isinstance(t, tuple))
# Create using positional and keyword args
print(T(3, bar=4))
try:
t[0] = 200
except TypeError:
print("TypeError")
try:
t.bar = 200
except AttributeError:
print("AttributeError")
try:
t = T(1)
except TypeError:
print("TypeError")
try:
t = T(1, 2, 3)
except TypeError:
print("TypeError")
try:
t = T(foo=1)
except TypeError:
print("TypeError")
try:
t = T(1, foo=1)
except TypeError:
print("TypeError")
# enough args, but kw is wrong
try:
t = T(1, baz=3)
except TypeError:
print("TypeError")
# bad argument for member spec
try:
namedtuple('T', 1)
except TypeError:
print("TypeError")
# Try single string
T3 = namedtuple("TupComma", "foo bar")
t = T3(1, 2)
print(t.foo, t.bar)
# Try tuple
T4 = namedtuple("TupTuple", ("foo", "bar"))
t = T4(1, 2)
print(t.foo, t.bar)
# Try single string with comma field separator
# Not implemented so far
#T2 = namedtuple("TupComma", "foo,bar")
#t = T2(1, 2)
|
mit
|
StefanRijnhart/OpenUpgrade
|
addons/website_certification/__init__.py
|
385
|
1030
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import certification
import controllers
|
agpl-3.0
|
TurkuNLP/CAFA3
|
learning/submit.py
|
1
|
3231
|
import os, shutil
import subprocess
SLURMJobTemplate = """#!/bin/bash -l
##execution shell environment
## name of your job
#SBATCH -J %job
## system error message output file
#SBATCH -e %outDir/stderr.txt
## system message output file
#SBATCH -o %outDir/stdout.txt
## a per-process (soft) memory limit
## limit is specified in MB
## example: 1 GB is 1000
#SBATCH --mem-per-cpu=%memory
## how long a job takes, wallclock time hh:mm:ss
#SBATCH -t %wallTime
## number of processes
#SBATCH -n %cores
## partition
#SBATCH -p %partition
module load biopython-env
mkdir -p %outDir
cd %commandPath
%command
seff $SLURM_JOBID"""
def submit(command, outDir, job, memory=4000, cores=1, wallTime="48:00:00", partition="serial", dummy=False, clear=False):
global SLURMJobTemplate
if not dummy:
if os.path.exists(outDir):
if clear:
print "Removing output directory", outDir
shutil.rmtree(outDir)
else:
print "Output directory", outDir, "already exists"
raise Exception()
print "Making output directory", outDir
os.makedirs(outDir)
#command = command.replace("%outDir")
commandPath = os.path.abspath(os.getcwd())
template = SLURMJobTemplate
for param, value in [("%commandPath", commandPath), ("%command", command), ("%outDir", outDir), ("%job", job), ("%memory", memory), ("%cores", cores), ("%wallTime", wallTime), ("%partition", partition)]:
if value == None:
raise Exception("Undefined parameter '" + param + "'")
#print (param, value)
template = template.replace(param, str(value))
#print template
print "==========", "Template", "=========="
print template
print "===================================="
if not dummy:
with open(os.path.join(outDir, "template.txt"), "wt") as f:
f.write(template)
print "Submitting job", job
p = subprocess.Popen("sbatch", stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
print p.communicate(input=template)
if __name__=="__main__":
from optparse import OptionParser
optparser = OptionParser(description="")
optparser.add_option('-c','--command', default=None, help='')
optparser.add_option("-o", "--outDir", default=None, help="")
optparser.add_option('-j','--job', default=None, help='')
optparser.add_option('-m','--memory', default=4, type=int, help='')
optparser.add_option('-r','--cores', default=1, type=int, help='')
optparser.add_option('-t','--time', default="48:00:00", help='')
optparser.add_option('-p','--partition', default="serial", help='')
optparser.add_option("--dummy", default=False, action="store_true", help="")
optparser.add_option("--clear", default=False, action="store_true", help="")
(options, args) = optparser.parse_args()
if options.job == None:
options.job = os.path.basename(options.outDir)
submit(command=options.command, outDir=options.outDir, job=options.job, memory=options.memory,
cores=options.cores, wallTime=options.time, partition=options.partition,
dummy=options.dummy, clear=options.clear)
|
lgpl-3.0
|
arleighdickerson/HomeworkODE
|
problem5.py
|
1
|
1626
|
from pylab import *
import numpy as np
import scipy.optimize as op
from scipy.integrate import odeint
from mpl_toolkits.mplot3d.axes3d import Axes3D
epsilon = 0.9
# Initial position in space
r0 = [0.0, 1-epsilon, np.sqrt((1+epsilon)/(1-epsilon)), 0.0]
def f(t, r):
(w, x, y, z) = r
dw_dt = -x/(x**2+z**2)**1.5
dx_dt = w
dy_dt = -z/(x**2+z**2)**1.5
dz_dt = y
return array([dw_dt, dx_dt, dy_dt, dz_dt])
def function(u):
return u-epsilon*sin(u)-t
def realF(t):
u = op.fsolve(function, 0)
w = sin(u)/(1-epsilon*cos(u))
x = cos(u)-epsilon
y = (np.sqrt(1-epsilon**2)*cos(u))/(1-epsilon*cos(u))
z = np.sqrt(1-epsilon**2)*sin(u)
return array([w,x,y,z])
tf = 50
t0=0.0
k = 0.005
def rk4(t, k, y, f):
k1 = k * f(t, y)
k2 = k * f(t + 0.5*k, y + 0.5*k1)
k3 = k * f(t + 0.5*k, y + 0.5*k2)
k4 = k * f(t + k, y + k3)
return t + k, y + (k1 + 2*(k2 + k3) + k4)/6.0
count = 0
t=t0
r=r0
rp = []
rp.append(r0)
tp = []
tp.append(t0)
realpoints = []
realpoints.append(r0)
u=0
while t<tf:
realpoints.append(realF(t))
t,r = rk4(t,k,r,f)
tp.append(t)
rp.append(r)
rp = array(rp)
x = rp[:, 1]
y = rp[:, 2]
z = rp[:, 3]
realpoints = array(realpoints)
xExact = realpoints[:, 1]
yExact = realpoints[:, 2]
zExact = realpoints[:, 3]
fig, ax = subplots(1, 3, sharex=True, sharey=True, figsize=(16,8))
ax[0].plot(tp, x, 'r', tp, xExact, 'b')
ax[0].set_title('x vs t')
ax[1].plot(tp, z, 'r', tp, zExact, 'b')
ax[1].set_title('z vs t')
ax[2].plot(x, z, 'r', xExact, zExact, 'b')
ax[2].set_title('X-Z cut')
show()
|
unlicense
|
tscohen/chainer
|
cupy/manipulation/join.py
|
17
|
4074
|
import numpy
import six
import cupy
def column_stack(tup):
"""Stacks 1-D and 2-D arrays as columns into a 2-D array.
A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays
are concatenated along the second axis.
Args:
tup (sequence of arrays): 1-D or 2-D arrays to be stacked.
Returns:
cupy.ndarray: A new 2-D array of stacked columns.
.. seealso:: :func:`numpy.column_stack`
"""
if any(not isinstance(a, cupy.ndarray) for a in tup):
raise TypeError('Only cupy arrays can be column stacked')
lst = list(tup)
for i, a in enumerate(lst):
if a.ndim == 1:
a = a[:, cupy.newaxis]
lst[i] = a
elif a.ndim != 2:
raise ValueError(
'Only 1 or 2 dimensional arrays can be column stacked')
return concatenate(lst, axis=1)
def concatenate(tup, axis=0):
"""Joins arrays along an axis.
Args:
tup (sequence of arrays): Arrays to be joined. All of these should have
same dimensionalities except the specified axis.
axis (int): The axis to join arrays along.
Returns:
cupy.ndarray: Joined array.
.. seealso:: :func:`numpy.concatenate`
"""
ndim = None
shape = None
for a in tup:
if not isinstance(a, cupy.ndarray):
raise TypeError('Only cupy arrays can be concatenated')
if a.ndim == 0:
raise TypeError('zero-dimensional arrays cannot be concatenated')
if ndim is None:
ndim = a.ndim
shape = list(a.shape)
axis = _get_positive_axis(a.ndim, axis)
continue
if a.ndim != ndim:
raise ValueError(
'All arrays to concatenate must have the same ndim')
if any(i != axis and shape[i] != a.shape[i]
for i in six.moves.range(ndim)):
raise ValueError(
'All arrays must have same shape except the axis to '
'concatenate')
shape[axis] += a.shape[axis]
if ndim is None:
raise ValueError('Cannot concatenate from empty tuple')
dtype = numpy.find_common_type([a.dtype for a in tup], [])
ret = cupy.empty(shape, dtype=dtype)
skip = (slice(None),) * axis
i = 0
for a in tup:
aw = a.shape[axis]
ret[skip + (slice(i, i + aw),)] = a
i += aw
return ret
def dstack(tup):
"""Stacks arrays along the third axis.
Args:
tup (sequence of arrays): Arrays to be stacked. Each array is converted
by :func:`cupy.atleast_3d` before stacking.
Returns:
cupy.ndarray: Stacked array.
.. seealso:: :func:`numpy.dstack`
"""
return concatenate(cupy.atleast_3d(*tup), 2)
def hstack(tup):
"""Stacks arrays horizontally.
If an input array has one dimension, then the array is treated as a
horizontal vector and stacked along the first axis. Otherwise, the array is
stacked along the second axis.
Args:
tup (sequence of arrays): Arrays to be stacked.
Returns:
cupy.ndarray: Stacked array.
.. seealso:: :func:`numpy.hstack`
"""
arrs = [cupy.atleast_1d(a) for a in tup]
axis = 1
if arrs[0].ndim == 1:
axis = 0
return concatenate(tup, axis)
def vstack(tup):
"""Stacks arrays vertically.
If an input array has one dimension, then the array is treated as a
horizontal vector and stacked along the additional axis at the head.
Otherwise, the array is stacked along the first axis.
Args:
tup (sequence of arrays): Arrays to be stacked. Each array is converted
by :func:`cupy.atleast_2d` before stacking.
Returns:
cupy.ndarray: Stacked array.
.. seealso:: :func:`numpy.dstack`
"""
return concatenate(cupy.atleast_2d(*tup), 0)
def _get_positive_axis(ndim, axis):
a = axis
if a < 0:
a += ndim
if a < 0 or a >= ndim:
raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))
return a
|
mit
|
trew/monlog
|
monlog/log/api/authentication.py
|
2
|
2770
|
from django.contrib.auth.models import User
from tastypie.models import ApiKey
from tastypie.http import HttpUnauthorized
from tastypie.authentication import Authentication
class CookieAuthentication(Authentication):
"""
Handles auth from Session cookie provided by the user.
"""
def _unauthorized(self):
return HttpUnauthorized()
def is_authenticated(self, request, **kwargs):
"""
User is authenticated if the variable ``_auth_user_id`` is
found in the session.
"""
from django.contrib.sessions.models import Session
if 'sessionid' in request.COOKIES:
s = Session.objects.get(pk=request.COOKIES['sessionid'])
if '_auth_user_id' in s.get_decoded():
user = User.objects.get(id=s.get_decoded()['_auth_user_id'])
request.user = user
return True
return self._unauthorized()
class MonlogAuthentication(Authentication):
"""
Handles API key auth, in which a user provides an API key.
Uses the ``ApiKey`` model that ships with tastypie.
"""
def _unauthorized(self):
return HttpUnauthorized()
def extract_apikey(self, request):
"""
Extracts the API key from the request
If both GET and POST dictionaries contains ``api_key``, POST
will be used.
"""
return request.GET.get('api_key') or request.POST.get('api_key')
def get_username_from_api_key(self, api_key):
"""
Gets the username connected to an API key.
"""
try:
key = ApiKey.objects.get(key=api_key)
username = User.objects.get(username=key.user.username)
except ApiKey.DoesNotExist, User.DoesNotExist:
return self._unauthorized()
def is_authenticated(self, request, **kwargs):
"""
Finds the user associated with an API key.
Returns either ``True`` if allowed, or ``HttpUnauthorized`` if not.
"""
api_key = self.extract_apikey(request)
if not api_key:
return self._unauthorized()
try:
key = ApiKey.objects.get(key=api_key)
user = User.objects.get(username=key.user.username)
except ApiKey.DoesNotExist, User.DoesNotExist:
return self._unauthorized()
request.user = user
return True
def get_identifier(self, request):
"""
Provides a unique string identifier for the requester.
This implementation returns the username of the user or ``nouser``
if something went wrong.
"""
api_key = self.extract_apikey(request)
username = self.get_username_from_api_key(api_key)
return username or 'nouser'
|
mit
|
bdfoster/blumate
|
blumate/components/statsd.py
|
1
|
1768
|
"""
A component which allows you to send data to StatsD.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/statsd/
"""
import logging
import blumate.util as util
from blumate.const import EVENT_STATE_CHANGED
from blumate.helpers import state as state_helper
_LOGGER = logging.getLogger(__name__)
DOMAIN = "statsd"
DEPENDENCIES = []
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8125
DEFAULT_PREFIX = 'hass'
DEFAULT_RATE = 1
REQUIREMENTS = ['python-statsd==1.7.2']
CONF_HOST = 'host'
CONF_PORT = 'port'
CONF_PREFIX = 'prefix'
CONF_RATE = 'rate'
def setup(hass, config):
"""Setup the StatsD component."""
from statsd.compat import NUM_TYPES
import statsd
conf = config[DOMAIN]
host = conf[CONF_HOST]
port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT)
sample_rate = util.convert(conf.get(CONF_RATE), int, DEFAULT_RATE)
prefix = util.convert(conf.get(CONF_PREFIX), str, DEFAULT_PREFIX)
statsd_connection = statsd.Connection(
host=host,
port=port,
sample_rate=sample_rate,
disabled=False
)
meter = statsd.Gauge(prefix, statsd_connection)
def statsd_event_listener(event):
"""Listen for new messages on the bus and sends them to StatsD."""
state = event.data.get('new_state')
if state is None:
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
return
if not isinstance(_state, NUM_TYPES):
return
_LOGGER.debug('Sending %s.%s', state.entity_id, _state)
meter.send(state.entity_id, _state)
hass.bus.listen(EVENT_STATE_CHANGED, statsd_event_listener)
return True
|
mit
|
kartikgupta0909/build-mozharness
|
mozharness/base/config.py
|
4
|
20281
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""Generic config parsing and dumping, the way I remember it from scripts
gone by.
The config should be built from script-level defaults, overlaid by
config-file defaults, overlaid by command line options.
(For buildbot-analogues that would be factory-level defaults,
builder-level defaults, and build request/scheduler settings.)
The config should then be locked (set to read-only, to prevent runtime
alterations). Afterwards we should dump the config to a file that is
uploaded with the build, and can be used to debug or replicate the build
at a later time.
TODO:
* check_required_settings or something -- run at init, assert that
these settings are set.
"""
from copy import deepcopy
from optparse import OptionParser, Option, OptionGroup
import os
import sys
import urllib2
import socket
import time
try:
import simplejson as json
except ImportError:
import json
from mozharness.base.log import DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL
# optparse {{{1
class ExtendedOptionParser(OptionParser):
"""OptionParser, but with ExtendOption as the option_class.
"""
def __init__(self, **kwargs):
kwargs['option_class'] = ExtendOption
OptionParser.__init__(self, **kwargs)
class ExtendOption(Option):
"""from http://docs.python.org/library/optparse.html?highlight=optparse#adding-new-actions"""
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(
self, action, dest, opt, value, values, parser)
def make_immutable(item):
if isinstance(item, list) or isinstance(item, tuple):
result = LockedTuple(item)
elif isinstance(item, dict):
result = ReadOnlyDict(item)
result.lock()
else:
result = item
return result
class LockedTuple(tuple):
def __new__(cls, items):
return tuple.__new__(cls, (make_immutable(x) for x in items))
def __deepcopy__(self, memo):
return [deepcopy(elem, memo) for elem in self]
# ReadOnlyDict {{{1
class ReadOnlyDict(dict):
def __init__(self, dictionary):
self._lock = False
self.update(dictionary.copy())
def _check_lock(self):
assert not self._lock, "ReadOnlyDict is locked!"
def lock(self):
for (k, v) in self.items():
self[k] = make_immutable(v)
self._lock = True
def __setitem__(self, *args):
self._check_lock()
return dict.__setitem__(self, *args)
def __delitem__(self, *args):
self._check_lock()
return dict.__delitem__(self, *args)
def clear(self, *args):
self._check_lock()
return dict.clear(self, *args)
def pop(self, *args):
self._check_lock()
return dict.pop(self, *args)
def popitem(self, *args):
self._check_lock()
return dict.popitem(self, *args)
def setdefault(self, *args):
self._check_lock()
return dict.setdefault(self, *args)
def update(self, *args):
self._check_lock()
dict.update(self, *args)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
result._lock = False
for k, v in self.items():
result[k] = deepcopy(v, memo)
return result
# parse_config_file {{{1
def parse_config_file(file_name, quiet=False, search_path=None,
config_dict_name="config"):
"""Read a config file and return a dictionary.
"""
file_path = None
if os.path.exists(file_name):
file_path = file_name
else:
if not search_path:
search_path = ['.', os.path.join(sys.path[0], '..', 'configs'),
os.path.join(sys.path[0], '..', '..', 'configs')]
for path in search_path:
if os.path.exists(os.path.join(path, file_name)):
file_path = os.path.join(path, file_name)
break
else:
raise IOError("Can't find %s in %s!" % (file_name, search_path))
if file_name.endswith('.py'):
global_dict = {}
local_dict = {}
execfile(file_path, global_dict, local_dict)
config = local_dict[config_dict_name]
elif file_name.endswith('.json'):
fh = open(file_path)
config = {}
json_config = json.load(fh)
config = dict(json_config)
fh.close()
else:
raise RuntimeError("Unknown config file type %s!" % file_name)
# TODO return file_path
return config
def download_config_file(url, file_name):
n = 0
attempts = 5
sleeptime = 60
max_sleeptime = 5 * 60
while True:
if n >= attempts:
print "Failed to download from url %s after %d attempts, quiting..." % (url, attempts)
raise SystemError(-1)
try:
contents = urllib2.urlopen(url, timeout=30).read()
break
except urllib2.URLError, e:
print "Error downloading from url %s: %s" % (url, str(e))
except socket.timeout, e:
print "Time out accessing %s: %s" % (url, str(e))
except socket.error, e:
print "Socket error when accessing %s: %s" % (url, str(e))
print "Sleeping %d seconds before retrying" % sleeptime
time.sleep(sleeptime)
sleeptime = sleeptime * 2
if sleeptime > max_sleeptime:
sleeptime = max_sleeptime
n += 1
try:
f = open(file_name, 'w')
f.write(contents)
f.close()
except IOError, e:
print "Error writing downloaded contents to file %s: %s" % (file_name, str(e))
raise SystemError(-1)
# BaseConfig {{{1
class BaseConfig(object):
"""Basic config setting/getting.
"""
def __init__(self, config=None, initial_config_file=None, config_options=None,
all_actions=None, default_actions=None,
volatile_config=None, option_args=None,
require_config_file=False, usage="usage: %prog [options]"):
self._config = {}
self.all_cfg_files_and_dicts = []
self.actions = []
self.config_lock = False
self.require_config_file = require_config_file
if all_actions:
self.all_actions = all_actions[:]
else:
self.all_actions = ['clobber', 'build']
if default_actions:
self.default_actions = default_actions[:]
else:
self.default_actions = self.all_actions[:]
if volatile_config is None:
self.volatile_config = {
'actions': None,
'add_actions': None,
'no_actions': None,
}
else:
self.volatile_config = deepcopy(volatile_config)
if config:
self.set_config(config)
if initial_config_file:
initial_config = parse_config_file(initial_config_file)
self.all_cfg_files_and_dicts.append(
(initial_config_file, initial_config)
)
self.set_config(initial_config)
if config_options is None:
config_options = []
self._create_config_parser(config_options, usage)
# we allow manually passing of option args for things like nosetests
self.parse_args(args=option_args)
def get_read_only_config(self):
return ReadOnlyDict(self._config)
def _create_config_parser(self, config_options, usage):
self.config_parser = ExtendedOptionParser(usage=usage)
self.config_parser.add_option(
"--work-dir", action="store", dest="work_dir",
type="string", default="build",
help="Specify the work_dir (subdir of base_work_dir)"
)
self.config_parser.add_option(
"--base-work-dir", action="store", dest="base_work_dir",
type="string", default=os.getcwd(),
help="Specify the absolute path of the parent of the working directory"
)
self.config_parser.add_option(
"-c", "--config-file", "--cfg", action="extend", dest="config_files",
type="string", help="Specify the config files"
)
self.config_parser.add_option(
"-C", "--opt-config-file", "--opt-cfg", action="extend",
dest="opt_config_files", type="string", default=[],
help="Specify the optional config files"
)
self.config_parser.add_option(
"--dump-config", action="store_true",
dest="dump_config",
help="List and dump the config generated from this run to "
"a JSON file."
)
self.config_parser.add_option(
"--dump-config-hierarchy", action="store_true",
dest="dump_config_hierarchy",
help="Like dump config but will list and dump which config "
"files were used making up the config and specify their own "
"keys/values that were not overwritten by another cfg -- "
"held the highest hierarchy."
)
# Logging
log_option_group = OptionGroup(self.config_parser, "Logging")
log_option_group.add_option(
"--log-level", action="store",
type="choice", dest="log_level", default=INFO,
choices=[DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL],
help="Set log level (debug|info|warning|error|critical|fatal)"
)
log_option_group.add_option(
"-q", "--quiet", action="store_false", dest="log_to_console",
default=True, help="Don't log to the console"
)
log_option_group.add_option(
"--append-to-log", action="store_true",
dest="append_to_log", default=False,
help="Append to the log"
)
log_option_group.add_option(
"--multi-log", action="store_const", const="multi",
dest="log_type", help="Log using MultiFileLogger"
)
log_option_group.add_option(
"--simple-log", action="store_const", const="simple",
dest="log_type", help="Log using SimpleFileLogger"
)
self.config_parser.add_option_group(log_option_group)
# Actions
action_option_group = OptionGroup(
self.config_parser, "Actions",
"Use these options to list or enable/disable actions."
)
action_option_group.add_option(
"--list-actions", action="store_true",
dest="list_actions",
help="List all available actions, then exit"
)
action_option_group.add_option(
"--add-action", action="extend",
dest="add_actions", metavar="ACTIONS",
help="Add action %s to the list of actions" % self.all_actions
)
action_option_group.add_option(
"--no-action", action="extend",
dest="no_actions", metavar="ACTIONS",
help="Don't perform action"
)
for action in self.all_actions:
action_option_group.add_option(
"--%s" % action, action="append_const",
dest="actions", const=action,
help="Add %s to the limited list of actions" % action
)
action_option_group.add_option(
"--no-%s" % action, action="append_const",
dest="no_actions", const=action,
help="Remove %s from the list of actions to perform" % action
)
self.config_parser.add_option_group(action_option_group)
# Child-specified options
# TODO error checking for overlapping options
if config_options:
for option in config_options:
self.config_parser.add_option(*option[0], **option[1])
# Initial-config-specified options
config_options = self._config.get('config_options', None)
if config_options:
for option in config_options:
self.config_parser.add_option(*option[0], **option[1])
def set_config(self, config, overwrite=False):
"""This is probably doable some other way."""
if self._config and not overwrite:
self._config.update(config)
else:
self._config = config
return self._config
def get_actions(self):
return self.actions
def verify_actions(self, action_list, quiet=False):
for action in action_list:
if action not in self.all_actions:
if not quiet:
print("Invalid action %s not in %s!" % (action,
self.all_actions))
raise SystemExit(-1)
return action_list
def verify_actions_order(self, action_list):
try:
indexes = [ self.all_actions.index(elt) for elt in action_list ]
sorted_indexes = sorted(indexes)
for i in range(len(indexes)):
if indexes[i] != sorted_indexes[i]:
print(("Action %s comes in different order in %s\n" +
"than in %s") % (action_list[i], action_list, self.all_actions))
raise SystemExit(-1)
except ValueError as e:
print("Invalid action found: " + str(e))
raise SystemExit(-1)
def list_actions(self):
print "Actions available:"
for a in self.all_actions:
print " " + ("*" if a in self.default_actions else " "), a
raise SystemExit(0)
def get_cfgs_from_files(self, all_config_files, parser):
""" returns a dict from a given list of config files.
this method can be overwritten in a subclassed BaseConfig to add extra
logic to the way that self.config is made up.
For eg:
Say you don't wish to update self.config with the entire contents
of a config file. You may have a config file that represents a dict
of branches. These branches could be a series of dicts. You could
then look for the presence of such a known config file and take the
branch dict you desire from it.
"""
all_cfg_files_and_dicts = []
for cf in all_config_files:
try:
if '://' in cf: # config file is an url
file_name = os.path.basename(cf)
file_path = os.path.join(os.getcwd(), file_name)
download_config_file(cf, file_path)
all_cfg_files_and_dicts.append(
(file_path, parse_config_file(file_path))
)
else:
all_cfg_files_and_dicts.append((cf, parse_config_file(cf)))
except Exception:
if cf in parser.opt_config_files:
print(
"WARNING: optional config file not found %s" % cf
)
else:
raise
return all_cfg_files_and_dicts
def parse_args(self, args=None):
"""Parse command line arguments in a generic way.
Return the parser object after adding the basic options, so
child objects can manipulate it.
"""
self.command_line = ' '.join(sys.argv)
if not args:
args = sys.argv[1:]
(options, args) = self.config_parser.parse_args(args)
defaults = self.config_parser.defaults.copy()
if not options.config_files:
if self.require_config_file:
if options.list_actions:
self.list_actions()
print("Required config file not set! (use --config-file option)")
raise SystemExit(-1)
else:
# this is what get_cfgs_from_files returns. It will represent each
# config file name and its assoctiated dict
# eg ('builds/branch_specifics.py', {'foo': 'bar'})
# let's store this to self for things like --interpret-config-files
self.all_cfg_files_and_dicts.extend(self.get_cfgs_from_files(
# append opt_config to allow them to overwrite previous configs
options.config_files + options.opt_config_files, parser=options
))
config = {}
for i, (c_file, c_dict) in enumerate(self.all_cfg_files_and_dicts):
config.update(c_dict)
# assign or update self._config depending on if it exists or not
# NOTE self._config will be passed to ReadOnlyConfig's init -- a
# dict subclass with immutable locking capabilities -- and serve
# as the keys/values that make up that instance. Ultimately,
# this becomes self.config during BaseScript's init
self.set_config(config)
for key in defaults.keys():
value = getattr(options, key)
if value is None:
continue
# Don't override config_file defaults with config_parser defaults
if key in defaults and value == defaults[key] and key in self._config:
continue
self._config[key] = value
# The idea behind the volatile_config is we don't want to save this
# info over multiple runs. This defaults to the action-specific
# config options, but can be anything.
for key in self.volatile_config.keys():
if self._config.get(key) is not None:
self.volatile_config[key] = self._config[key]
del(self._config[key])
"""Actions.
Seems a little complex, but the logic goes:
First, if default_actions is specified in the config, set our
default actions even if the script specifies other default actions.
Without any other action-specific options, run with default actions.
If we specify --ACTION or --only-ACTION once or multiple times,
we want to override the default_actions list with the one(s) we list.
Otherwise, if we specify --add-action ACTION, we want to add an
action to the list.
Finally, if we specify --no-ACTION, remove that from the list of
actions to perform.
"""
if self._config.get('default_actions'):
default_actions = self.verify_actions(self._config['default_actions'])
self.default_actions = default_actions
self.verify_actions_order(self.default_actions)
if options.list_actions:
self.list_actions()
self.actions = self.default_actions[:]
if self.volatile_config['actions']:
actions = self.verify_actions(self.volatile_config['actions'])
self.actions = actions
elif self.volatile_config['add_actions']:
actions = self.verify_actions(self.volatile_config['add_actions'])
self.actions.extend(actions)
if self.volatile_config['no_actions']:
actions = self.verify_actions(self.volatile_config['no_actions'])
for action in actions:
if action in self.actions:
self.actions.remove(action)
# Keep? This is for saving the volatile config in the dump_config
self._config['volatile_config'] = self.volatile_config
self.options = options
self.args = args
return (self.options, self.args)
# __main__ {{{1
if __name__ == '__main__':
pass
|
mpl-2.0
|
donavoncade/namebench
|
nb_third_party/dns/tsig.py
|
215
|
7851
|
# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS TSIG support."""
import hmac
import struct
import dns.exception
import dns.rdataclass
import dns.name
class BadTime(dns.exception.DNSException):
"""Raised if the current time is not within the TSIG's validity time."""
pass
class BadSignature(dns.exception.DNSException):
"""Raised if the TSIG signature fails to verify."""
pass
class PeerError(dns.exception.DNSException):
"""Base class for all TSIG errors generated by the remote peer"""
pass
class PeerBadKey(PeerError):
"""Raised if the peer didn't know the key we used"""
pass
class PeerBadSignature(PeerError):
"""Raised if the peer didn't like the signature we sent"""
pass
class PeerBadTime(PeerError):
"""Raised if the peer didn't like the time we sent"""
pass
class PeerBadTruncation(PeerError):
"""Raised if the peer didn't like amount of truncation in the TSIG we sent"""
pass
default_algorithm = "HMAC-MD5.SIG-ALG.REG.INT"
BADSIG = 16
BADKEY = 17
BADTIME = 18
BADTRUNC = 22
def sign(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx=None, multi=False, first=True,
algorithm=default_algorithm):
"""Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata
for the input parameters, the HMAC MAC calculated by applying the
TSIG signature algorithm, and the TSIG digest context.
@rtype: (string, string, hmac.HMAC object)
@raises ValueError: I{other_data} is too long
@raises NotImplementedError: I{algorithm} is not supported
"""
(algorithm_name, digestmod) = get_algorithm(algorithm)
if first:
ctx = hmac.new(secret, digestmod=digestmod)
ml = len(request_mac)
if ml > 0:
ctx.update(struct.pack('!H', ml))
ctx.update(request_mac)
id = struct.pack('!H', original_id)
ctx.update(id)
ctx.update(wire[2:])
if first:
ctx.update(keyname.to_digestable())
ctx.update(struct.pack('!H', dns.rdataclass.ANY))
ctx.update(struct.pack('!I', 0))
long_time = time + 0L
upper_time = (long_time >> 32) & 0xffffL
lower_time = long_time & 0xffffffffL
time_mac = struct.pack('!HIH', upper_time, lower_time, fudge)
pre_mac = algorithm_name + time_mac
ol = len(other_data)
if ol > 65535:
raise ValueError('TSIG Other Data is > 65535 bytes')
post_mac = struct.pack('!HH', error, ol) + other_data
if first:
ctx.update(pre_mac)
ctx.update(post_mac)
else:
ctx.update(time_mac)
mac = ctx.digest()
mpack = struct.pack('!H', len(mac))
tsig_rdata = pre_mac + mpack + mac + id + post_mac
if multi:
ctx = hmac.new(secret)
ml = len(mac)
ctx.update(struct.pack('!H', ml))
ctx.update(mac)
else:
ctx = None
return (tsig_rdata, mac, ctx)
def hmac_md5(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx=None, multi=False, first=True,
algorithm=default_algorithm):
return sign(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx, multi, first, algorithm)
def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata,
tsig_rdlen, ctx=None, multi=False, first=True):
"""Validate the specified TSIG rdata against the other input parameters.
@raises FormError: The TSIG is badly formed.
@raises BadTime: There is too much time skew between the client and the
server.
@raises BadSignature: The TSIG signature did not validate
@rtype: hmac.HMAC object"""
(adcount,) = struct.unpack("!H", wire[10:12])
if adcount == 0:
raise dns.exception.FormError
adcount -= 1
new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start]
current = tsig_rdata
(aname, used) = dns.name.from_wire(wire, current)
current = current + used
(upper_time, lower_time, fudge, mac_size) = \
struct.unpack("!HIHH", wire[current:current + 10])
time = ((upper_time + 0L) << 32) + (lower_time + 0L)
current += 10
mac = wire[current:current + mac_size]
current += mac_size
(original_id, error, other_size) = \
struct.unpack("!HHH", wire[current:current + 6])
current += 6
other_data = wire[current:current + other_size]
current += other_size
if current != tsig_rdata + tsig_rdlen:
raise dns.exception.FormError
if error != 0:
if error == BADSIG:
raise PeerBadSignature
elif error == BADKEY:
raise PeerBadKey
elif error == BADTIME:
raise PeerBadTime
elif error == BADTRUNC:
raise PeerBadTruncation
else:
raise PeerError('unknown TSIG error code %d' % error)
time_low = time - fudge
time_high = time + fudge
if now < time_low or now > time_high:
raise BadTime
(junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge,
original_id, error, other_data,
request_mac, ctx, multi, first, aname)
if (our_mac != mac):
raise BadSignature
return ctx
def get_algorithm(algorithm):
"""Returns the wire format string and the hash module to use for the
specified TSIG algorithm
@rtype: (string, hash constructor)
@raises NotImplementedError: I{algorithm} is not supported
"""
hashes = {}
try:
import hashlib
hashes[dns.name.from_text('hmac-sha224')] = hashlib.sha224
hashes[dns.name.from_text('hmac-sha256')] = hashlib.sha256
hashes[dns.name.from_text('hmac-sha384')] = hashlib.sha384
hashes[dns.name.from_text('hmac-sha512')] = hashlib.sha512
hashes[dns.name.from_text('hmac-sha1')] = hashlib.sha1
hashes[dns.name.from_text('HMAC-MD5.SIG-ALG.REG.INT')] = hashlib.md5
import sys
if sys.hexversion < 0x02050000:
# hashlib doesn't conform to PEP 247: API for
# Cryptographic Hash Functions, which hmac before python
# 2.5 requires, so add the necessary items.
class HashlibWrapper:
def __init__(self, basehash):
self.basehash = basehash
self.digest_size = self.basehash().digest_size
def new(self, *args, **kwargs):
return self.basehash(*args, **kwargs)
for name in hashes:
hashes[name] = HashlibWrapper(hashes[name])
except ImportError:
import md5, sha
hashes[dns.name.from_text('HMAC-MD5.SIG-ALG.REG.INT')] = md5.md5
hashes[dns.name.from_text('hmac-sha1')] = sha.sha
if isinstance(algorithm, (str, unicode)):
algorithm = dns.name.from_text(algorithm)
if algorithm in hashes:
return (algorithm.to_digestable(), hashes[algorithm])
raise NotImplementedError("TSIG algorithm " + str(algorithm) +
" is not supported")
|
apache-2.0
|
tensorflow/tensorflow
|
tensorflow/lite/experimental/mlir/testing/op_tests/cumsum.py
|
6
|
2030
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for cumsum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_cumsum_tests(options):
"""Make a set of tests to do cumsum."""
test_parameters = [{
"shape": [(3, 6), (8, 9, 7)],
"dtype": [tf.int32, tf.int64, tf.float32],
"axis": [0, 1],
"exclusive": [True, False],
"reverse": [True, False],
}]
def build_graph(parameters):
"""Build the cumsum op testing graph."""
input1 = tf.compat.v1.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"])
out = tf.math.cumsum(
input1,
parameters["axis"],
exclusive=parameters["exclusive"],
reverse=parameters["reverse"])
return [input1], [out]
def build_inputs(parameters, sess, inputs, outputs):
input1 = create_tensor_data(parameters["dtype"], parameters["shape"])
return [input1], sess.run(outputs, feed_dict=dict(zip(inputs, [input1])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
apache-2.0
|
HellerCommaA/flask-angular
|
lib/python2.7/site-packages/pip/vendor/distlib/wheel.py
|
79
|
23490
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import DistributionPath, InstalledDistribution
from .metadata import Metadata
from .scripts import ScriptMaker
from .util import (FileOperator, convert_path, CSVReader, CSVWriter,
cached_property, get_cache_base)
logger = logging.getLogger(__name__)
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
ABI = 'none'
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
result.__package__, _ = fullname.rsplit('.', 1)
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 0)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
return '%s-%s%s-%s-%s-%s.whl' % (self.name, self.version, buildver,
pyver, abi, arch)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'METADATA')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata()
result.read_file(wf)
return result
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
message = message_from_file(wf)
result = dict(message)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
data = b'#!python' + data[m.end():]
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = b'#!python' + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
p = to_posix(os.path.relpath(record_path, base))
writer.writerow((p, '', ''))
def build(self, paths, tags=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
import distlib
wheel_metadata = [
'Wheel-Version: %d.%d' % self.wheel_version,
'Generator: distlib %s' % distlib.__version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
records = []
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
return pathname
def install(self, paths, dry_run=False, executable=None, warner=None):
"""
Install a wheel to the specified paths. If ``executable`` is specified,
it should be the Unicode absolute path the to the executable written
into the shebang lines of any scripts installed. If ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
"""
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(record_name, stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker = ScriptMaker(workdir, None, fileop=fileop,
add_launchers=False)
maker.executable = executable
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception as e: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
result = os.path.join(get_cache_base(), 'dylib-cache')
if not os.path.isdir(result):
os.makedirs(result)
return result
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache_base = self._get_dylib_cache()
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not is_compatible(self):
msg = 'Wheel %s not mountable in this Python.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
# Most specific - our Python version, ABI and arch
for abi in abis:
result.append((''.join((IMP_PREFIX, versions[0])), abi, ARCH))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return result
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
|
mit
|
Eureka22/ASM_xf
|
PythonD/site_python/twisted/conch/ssh/service.py
|
2
|
1933
|
# Twisted, the Framework of Your Internet
# Copyright (C) 2001-2002 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""The parent class for all the SSH services. Currently implemented services are: ssh-userauth and ssh-connection.
This module is unstable.
Maintainer: U{Paul Swartz<mailto:[email protected]>}
"""
from twisted.python import log
class SSHService:
name = None # this is the ssh name for the service
protocolMessages = {} # these map #'s -> protocol names
transport = None # gets set later
def serviceStarted(self):
"""
called when the service is active on the transport.
"""
def serviceStopped(self):
"""
called when the service is stopped, either by the connection ending
or by another service being started
"""
def packetReceived(self, messageType, packet):
"""
called when we receieve a packet on the transport
"""
#print self.protocolMessages
f = getattr(self,'ssh_%s' % self.protocolMessages[messageType][4:], None)
if f:
f(packet)
else:
log.msg("couldn't handle", messageType)
log.msg(repr(packet[1:]))
self.transport.sendUnimplemented()
|
gpl-2.0
|
kensoc/kendroid_kernel
|
scripts/gdb/linux/lists.py
|
630
|
2897
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# list tools
#
# Copyright (c) Thiebaud Weksteen, 2015
#
# Authors:
# Thiebaud Weksteen <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
list_head = utils.CachedType("struct list_head")
def list_check(head):
nb = 0
if (head.type == list_head.get_type().pointer()):
head = head.dereference()
elif (head.type != list_head.get_type()):
raise gdb.GdbError('argument must be of type (struct list_head [*])')
c = head
try:
gdb.write("Starting with: {}\n".format(c))
except gdb.MemoryError:
gdb.write('head is not accessible\n')
return
while True:
p = c['prev'].dereference()
n = c['next'].dereference()
try:
if p['next'] != c.address:
gdb.write('prev.next != current: '
'current@{current_addr}={current} '
'prev@{p_addr}={p}\n'.format(
current_addr=c.address,
current=c,
p_addr=p.address,
p=p,
))
return
except gdb.MemoryError:
gdb.write('prev is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
try:
if n['prev'] != c.address:
gdb.write('next.prev != current: '
'current@{current_addr}={current} '
'next@{n_addr}={n}\n'.format(
current_addr=c.address,
current=c,
n_addr=n.address,
n=n,
))
return
except gdb.MemoryError:
gdb.write('next is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
c = n
nb += 1
if c == head:
gdb.write("list is consistent: {} node(s)\n".format(nb))
return
class LxListChk(gdb.Command):
"""Verify a list consistency"""
def __init__(self):
super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
raise gdb.GdbError("lx-list-check takes one argument")
list_check(gdb.parse_and_eval(argv[0]))
LxListChk()
|
gpl-2.0
|
lenovor/scikit-learn
|
sklearn/decomposition/pca.py
|
192
|
23117
|
""" Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
|
bsd-3-clause
|
Anonymouslemming/ansible
|
lib/ansible/modules/windows/win_wakeonlan.py
|
18
|
2191
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_wakeonlan
version_added: '2.4'
short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
description:
- The C(win_wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
options:
mac:
description:
- MAC address to send Wake-on-LAN broadcast packet for.
required: true
broadcast:
description:
- Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
default: 255.255.255.255
port:
description:
- UDP port to use for magic Wake-on-LAN packet.
default: 7
author:
- Dag Wieers (@dagwieers)
todo:
- Does not have SecureOn password support
notes:
- This module sends a magic packet, without knowing whether it worked. It always report a change.
- Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS).
- Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first).
'''
EXAMPLES = r'''
- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
win_wakeonlan:
mac: 00:00:5E:00:53:66
broadcast: 192.0.2.23
- name: Send a magic Wake-On-LAN packet on port 9 to 00-00-5E-00-53-66
win_wakeonlan:
mac: 00-00-5E-00-53-66
port: 9
delegate_to: remote_system
'''
RETURN = r'''
# Default return values
'''
|
gpl-3.0
|
ddy88958620/lib
|
Python/scrapy/naturebest/healthspan.py
|
2
|
2536
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip\
as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class HealthSpanSpider(BaseSpider):
name = 'healthspan.co.uk'
allowed_domains = ['www.healthspan.co.uk', 'healthspan.co.uk']
start_urls = ('http://www.healthspan.co.uk/products/',)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# getting product links from A-Z product list
links = hxs.select('//td[@class="itemL"]/span/a/@href').extract()
for prod_url in links:
url = urljoin_rfc(get_base_url(response), prod_url)
yield Request(url)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
name = hxs.select('//h1[@class="item"]/span/text()').extract()
if name:
url = response.url
url = urljoin_rfc(get_base_url(response), url)
loader = ProductLoader(item=Product(), selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name[0])
items = hxs.select('//div[@class="sku-details"]')
for item in items:
loader = ProductLoader(item=Product(), selector=hxs)
loader.add_value('url', url)
#loader.add_value('name', name[0])
n = name[0].strip()
sku = ''.join(item.select('.//span[@class="sku-description"]//text()').extract())
if sku:
n += ' ' + sku.strip()
loader.add_value('name', n)
price = item.select('./span[@class="price"]/text()').extract()
if price:
loader.add_value('price', price[0])
else:
price = item.select('./span[@class="special-price"]/text()').extract()
loader.add_value('price', price[0])
yield loader.load_item()
|
apache-2.0
|
topxiaoke/myedx
|
common/djangoapps/user_api/middleware.py
|
9
|
1651
|
"""
Middleware for user api.
Adds user's tags to tracking event context.
"""
from track.contexts import COURSE_REGEX
from eventtracking import tracker
from user_api.models import UserCourseTag
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class UserTagsEventContextMiddleware(object):
"""Middleware that adds a user's tags to tracking event context."""
CONTEXT_NAME = 'user_tags_context'
def process_request(self, request):
"""
Add a user's tags to the tracking event context.
"""
match = COURSE_REGEX.match(request.build_absolute_uri())
course_id = None
if match:
course_id = match.group('course_id')
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
context = {}
if course_id:
context['course_id'] = course_id
if request.user.is_authenticated():
context['course_user_tags'] = dict(
UserCourseTag.objects.filter(
user=request.user.pk,
course_id=course_key,
).values_list('key', 'value')
)
else:
context['course_user_tags'] = {}
tracker.get_tracker().enter_context(
self.CONTEXT_NAME,
context
)
def process_response(self, request, response): # pylint: disable=unused-argument
"""Exit the context if it exists."""
try:
tracker.get_tracker().exit_context(self.CONTEXT_NAME)
except: # pylint: disable=bare-except
pass
return response
|
agpl-3.0
|
ospaceteam/outerspace
|
tests/common.py
|
2
|
6754
|
#!/usr/bin/env python2
#
# Copyright 2001 - 2016 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import atexit
import logging as log
import os
import shutil
import signal
import subprocess
import tempfile
import time
log.basicConfig(level=log.INFO, format='%(levelname)-7s: %(message)s')
TEMP_DIR = None
SERVER_OUT = None
UTILS_OUT = None
AI_OUT = None
BUP_OUT = None
CODE_ROOT=os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..'))
def killServer():
try:
with open(os.path.join(TEMP_DIR, 'server.pid'), 'r') as pid_file:
pid=int(pid_file.read())
log.warning('Cleaning up server process {0}'.format(pid))
os.kill(pid, signal.SIGKILL)
except:
pass
def startServer(upgrade=False):
args=[os.path.join(CODE_ROOT, 'outerspace.py'),
'server',
'--configdir=' + TEMP_DIR,
'--mode=0']
if upgrade:
log.info('Starting server with upgrade')
args.append('--upgrade')
else:
log.info('Starting server')
subprocess.Popen(args, stdout=SERVER_OUT, stderr=subprocess.STDOUT)
time.sleep(1) # give server process enough time to initialize
start_time=time.time()
while True:
args=[os.path.join(CODE_ROOT, 'tools', 'osclient_cli.py'),
'--ping',
'--configdir=' + TEMP_DIR]
if subprocess.call(args, stdout=BUP_OUT,
stderr=subprocess.STDOUT) == 0:
break
try:
with open(os.path.join(TEMP_DIR, 'server.pid'), 'r') as pid_file:
pid=int(pid_file.read())
os.kill(pid, 0)
if time.time() - start_time > 60:
log.error('Server takes too long to initialize')
killServer()
return False
log.debug('Waiting for server to initialize')
time.sleep(5)
except (OSError, IOError):
log.error('Server failed to initialize')
return False
log.info('Server has been initialized')
return True
def stopServer():
log.info('Stopping server')
args=[os.path.join(CODE_ROOT, 'tools', 'osclient_cli.py'),
'--shutdown',
'--configdir=' + TEMP_DIR]
if subprocess.call(args, stdout=UTILS_OUT, stderr=subprocess.STDOUT) != 0:
log.error('Server failed to stop')
return False
start_time=time.time()
while os.path.isfile(os.path.join(TEMP_DIR, 'server.pid')):
if time.time() - start_time > 60:
log.error('Server takes too long to stop')
return False
log.debug('Waiting for server to stop')
time.sleep(5)
log.info('Server has been stopped')
return True
def startServerTime():
args=[os.path.join(CODE_ROOT, 'tools', 'osclient_cli.py'),
'--starttime',
'--configdir=' + TEMP_DIR]
if subprocess.call(args, stdout=UTILS_OUT, stderr=subprocess.STDOUT) != 0:
log.error('Time failed to start')
return False
log.info('Time has been started')
return True
def doTurns(amount, skip, slow=False, verbose=True):
args_osclient=[os.path.join(CODE_ROOT, 'tools', 'osclient_cli.py'),
'--turns={0}'.format(skip),
'--configdir=' + TEMP_DIR]
args_ai=[os.path.join(CODE_ROOT, 'outerspace.py'),
'ai-pool',
'--configdir=' + TEMP_DIR,
'--local']
if slow:
args_ai.append('--procs=1')
for turn in range(0, amount, skip):
subprocess.call(args_ai, stdout=AI_OUT,
stderr=subprocess.STDOUT)
subprocess.call(args_osclient, stdout=UTILS_OUT,
stderr=subprocess.STDOUT)
if verbose:
log.info('Turn {0}'.format(turn + skip))
def createGalaxy(galaxy_type, galaxy_name = None):
start_time = time.time()
if galaxy_name is None:
galaxy_name = galaxy_type
args=[os.path.join(CODE_ROOT, 'tools', 'osclient_cli.py'),
'--newgalaxy={0}'.format(galaxy_name),'{0}'.format(galaxy_type),
'--configdir=' + TEMP_DIR]
if subprocess.call(args, stdout=UTILS_OUT, stderr=subprocess.STDOUT) != 0:
log.error('Galaxy {0} has not been created'.format(galaxy_type))
else:
name_string = ''
if galaxy_name:
name_string = "named {0}".format(galaxy_name)
log.info('Galaxy {0} {1} created ({2:.2g}s)'.format(galaxy_type, name_string, time.time() - start_time))
def deleteGalaxy(galaxy_id):
args=[os.path.join(CODE_ROOT, 'tools', 'osclient_cli.py'),
'--deletegalaxy={0}'.format(galaxy_id),
'--configdir=' + TEMP_DIR]
if subprocess.call(args, stdout=UTILS_OUT, stderr=subprocess.STDOUT) != 0:
log.error('Galaxy {0} has not been deleted'.format(galaxy_id))
else:
log.info('Galaxy {0} deleted'.format(galaxy_id))
def makeScreenshots(history_dir):
try:
os.makedirs(history_dir)
except os.error:
pass
args=[os.path.join(CODE_ROOT, 'tools', 'osclient_cli.py'),
'--chronicler',
'--chronicler_dir=' + history_dir,
'--configdir=' + TEMP_DIR]
if subprocess.call(args, stdout=UTILS_OUT, stderr=subprocess.STDOUT) != 0:
log.error('Failed to make a screenshot.')
def closeLogs():
SERVER_OUT.close()
UTILS_OUT.close()
AI_OUT.close()
BUP_OUT.close()
def initPaths(configDir = None):
global TEMP_DIR, SERVER_OUT, UTILS_OUT, AI_OUT, BUP_OUT
TEMP_DIR=tempfile.mkdtemp()
if configDir is not None:
os.rmdir(TEMP_DIR)
shutil.copytree(configDir, TEMP_DIR)
SERVER_OUT=open(os.path.join(TEMP_DIR, 'server.out'), 'a')
UTILS_OUT=open(os.path.join(TEMP_DIR, 'utils.out'), 'a')
AI_OUT=open(os.path.join(TEMP_DIR, 'ai.out'), 'a')
BUP_OUT=open(os.path.join(TEMP_DIR, 'bup.out'), 'a')
log.info('Location of logs: ' + str(TEMP_DIR))
atexit.register(closeLogs)
|
gpl-2.0
|
rghe/ansible
|
lib/ansible/modules/cloud/scaleway/scaleway_server_facts.py
|
6
|
6064
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: scaleway_server_facts
short_description: Gather facts about the Scaleway servers available.
description:
- Gather facts about the Scaleway servers available.
version_added: "2.7"
author:
- "Yanis Guenane (@Spredzy)"
- "Remy Leone (@sieben)"
extends_documentation_fragment: scaleway
'''
EXAMPLES = r'''
- name: Gather Scaleway servers facts
scaleway_server_facts:
'''
RETURN = r'''
---
scaleway_server_facts:
description: Response from Scaleway API
returned: success
type: complex
contains:
"scaleway_server_facts": [
{
"arch": "x86_64",
"boot_type": "local",
"bootscript": {
"architecture": "x86_64",
"bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
"default": true,
"dtb": "",
"id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
"initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
"kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
"organization": "11111111-1111-4111-8111-111111111111",
"public": true,
"title": "x86_64 mainline 4.4.127 rev1"
},
"commercial_type": "START1-XS",
"creation_date": "2018-08-14T21:36:56.271545+00:00",
"dynamic_ip_required": false,
"enable_ipv6": false,
"extra_networks": [],
"hostname": "scw-e0d256",
"id": "12f19bc7-108c-4517-954c-e6b3d0311363",
"image": {
"arch": "x86_64",
"creation_date": "2018-04-26T12:42:21.619844+00:00",
"default_bootscript": {
"architecture": "x86_64",
"bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
"default": true,
"dtb": "",
"id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
"initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
"kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
"organization": "11111111-1111-4111-8111-111111111111",
"public": true,
"title": "x86_64 mainline 4.4.127 rev1"
},
"extra_volumes": [],
"from_server": null,
"id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
"modification_date": "2018-04-26T12:49:07.573004+00:00",
"name": "Ubuntu Xenial",
"organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
"public": true,
"root_volume": {
"id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
"name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
"size": 25000000000,
"volume_type": "l_ssd"
},
"state": "available"
},
"ipv6": null,
"location": {
"cluster_id": "5",
"hypervisor_id": "412",
"node_id": "2",
"platform_id": "13",
"zone_id": "par1"
},
"maintenances": [],
"modification_date": "2018-08-14T21:37:28.630882+00:00",
"name": "scw-e0d256",
"organization": "3f709602-5e6c-4619-b80c-e841c89734af",
"private_ip": "10.14.222.131",
"protected": false,
"public_ip": {
"address": "163.172.170.197",
"dynamic": false,
"id": "ea081794-a581-4495-8451-386ddaf0a451"
},
"security_group": {
"id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
"name": "Default security group"
},
"state": "running",
"state_detail": "booted",
"tags": [],
"volumes": {
"0": {
"creation_date": "2018-08-14T21:36:56.271545+00:00",
"export_uri": "device://dev/vda",
"id": "68386fae-4f55-4fbf-aabb-953036a85872",
"modification_date": "2018-08-14T21:36:56.271545+00:00",
"name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
"organization": "3f709602-5e6c-4619-b80c-e841c89734af",
"server": {
"id": "12f19bc7-108c-4517-954c-e6b3d0311363",
"name": "scw-e0d256"
},
"size": 25000000000,
"state": "available",
"volume_type": "l_ssd"
}
}
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.scaleway import (
Scaleway, ScalewayException, scaleway_argument_spec
)
class ScalewayServerFacts(Scaleway):
def __init__(self, module):
super(ScalewayServerFacts, self).__init__(module)
self.name = 'servers'
def main():
module = AnsibleModule(
argument_spec=scaleway_argument_spec(),
supports_check_mode=True,
)
try:
module.exit_json(
ansible_facts={'scaleway_server_facts': ScalewayServerFacts(module).get_resources()}
)
except ScalewayException as exc:
module.fail_json(msg=exc.message)
if __name__ == '__main__':
main()
|
gpl-3.0
|
autrilla/servo
|
tests/wpt/harness/wptrunner/wptmanifest/node.py
|
190
|
4304
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
class NodeVisitor(object):
def visit(self, node):
# This is ugly as hell, but we don't have multimethods and
# they aren't trivial to fake without access to the class
# object from the class body
func = getattr(self, "visit_%s" % (node.__class__.__name__))
return func(node)
class Node(object):
def __init__(self, data=None):
self.data = data
self.parent = None
self.children = []
def append(self, other):
other.parent = self
self.children.append(other)
def remove(self):
self.parent.children.remove(self)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.data)
def __str__(self):
rv = [repr(self)]
for item in self.children:
rv.extend(" %s" % line for line in str(item).split("\n"))
return "\n".join(rv)
def __eq__(self, other):
if not (self.__class__ == other.__class__ and
self.data == other.data and
len(self.children) == len(other.children)):
return False
for child, other_child in zip(self.children, other.children):
if not child == other_child:
return False
return True
def copy(self):
new = self.__class__(self.data)
for item in self.children:
new.append(item.copy())
return new
class DataNode(Node):
def append(self, other):
# Append that retains the invariant that child data nodes
# come after child nodes of other types
other.parent = self
if isinstance(other, DataNode):
self.children.append(other)
else:
index = len(self.children)
while index > 0 and isinstance(self.children[index - 1], DataNode):
index -= 1
for i in xrange(index):
assert other.data != self.children[i].data
self.children.insert(index, other)
class KeyValueNode(Node):
def append(self, other):
# Append that retains the invariant that conditional nodes
# come before unconditional nodes
other.parent = self
if isinstance(other, ValueNode):
if self.children:
assert not isinstance(self.children[-1], ValueNode)
self.children.append(other)
else:
if self.children and isinstance(self.children[-1], ValueNode):
self.children.insert(len(self.children) - 1, other)
else:
self.children.append(other)
class ListNode(Node):
def append(self, other):
other.parent = self
self.children.append(other)
class ValueNode(Node):
def append(self, other):
raise TypeError
class AtomNode(ValueNode):
pass
class ConditionalNode(Node):
pass
class UnaryExpressionNode(Node):
def __init__(self, operator, operand):
Node.__init__(self)
self.append(operator)
self.append(operand)
def append(self, other):
Node.append(self, other)
assert len(self.children) <= 2
def copy(self):
new = self.__class__(self.children[0].copy(),
self.children[1].copy())
return new
class BinaryExpressionNode(Node):
def __init__(self, operator, operand_0, operand_1):
Node.__init__(self)
self.append(operator)
self.append(operand_0)
self.append(operand_1)
def append(self, other):
Node.append(self, other)
assert len(self.children) <= 3
def copy(self):
new = self.__class__(self.children[0].copy(),
self.children[1].copy(),
self.children[2].copy())
return new
class UnaryOperatorNode(Node):
def append(self, other):
raise TypeError
class BinaryOperatorNode(Node):
def append(self, other):
raise TypeError
class IndexNode(Node):
pass
class VariableNode(Node):
pass
class StringNode(Node):
pass
class NumberNode(ValueNode):
pass
|
mpl-2.0
|
LabMagUBO/StoneX
|
concepts/1Dfunction.py
|
1
|
1470
|
#!/usr/bin/ipython-2.7 --pylab
# -*- coding: utf-8 -*-
import numpy as np
import pylab as pl
from scipy import optimize
from time import sleep
#function to plot
def plot_state():
pl.close()
pl.plot(x, f(x), 'b-', linewidth=2)
pl.grid()
pl.plot(r_pos, f(r_pos), 'ro')
pl.show()
def print_state():
x = r_pos[r_pos.size - 1]
print("Current position : ( %f , %f )" % (x , f(x)) )
def update_plot():
x = new_pos[0]
print "updating plot", x, f(x)
line1.set_ydata(x, f(x))
fig.canvas.draw()
#Définition de la fonction
def f(x):
return x**2 + 10*np.sin(x)
# initialisation des variables
x = np.linspace(-5,5, 64)
last_pos = 2
r_pos = np.array([last_pos])
# position initiale
pl.ion()
fig = pl.figure()
ax1 = pl.subplot(2, 1, 1)
ax2 = pl.subplot(2,1,2)
ax1.grid()
ax2.grid()
ax1.set_xlabel('x')
ax1.set_ylabel('E(x)')
ax2.set_xlabel('Iterations')
ax2.set_ylabel('x(i)')
ax1.plot(x, f(x), 'b-', linewidth=3)
ax1.plot(last_pos, f(last_pos), 'ro')
ax2.plot(0, last_pos, 'ro')
pl.draw()
print_state()
sleep(1)
i = 0
while True:
i = i+1
new_pos = optimize.fmin_bfgs(f, last_pos, maxiter=1, disp=False, epsilon=0.01)
r_pos = np.append(r_pos, new_pos)
ax1.plot(new_pos, f(new_pos), 'ro')
ax1.plot(r_pos, f(r_pos), 'r-')
ax2.plot(i, new_pos, 'ro')
ax2.plot(r_pos, 'r-')
pl.draw()
print_state()
sleep(0.5)
if new_pos == last_pos:
break
else:
last_pos = new_pos
|
gpl-3.0
|
Balannen/LSMASOMM
|
atom3/Kernel/ButtonsModels/createButtons.py
|
1
|
2506
|
# _ createButtons.py ____________________________________________________________________________
# createButtons : a class that subclasifies GraphGrammar. File generated automatically by ATOM3.
# ___________________________________________________________________________________________
from GraphGrammar import *
from buttonFromEntity import *
from buttonFromRelationship import *
class createButtons (GraphGrammar):
def __init__ (self, parent):
GraphGrammar.__init__(self, [buttonFromEntity(parent) , buttonFromRelationship(parent)])
def initialAction(self, graph):
self.rewritingSystem.name = self.rewritingSystem.parent.ASGroot.keyword_.toString()
self.rewritingSystem.NButtons = 0
fileName = self.rewritingSystem.name+".py"
cgd = self.rewritingSystem.parent.codeGenDir
self.rewritingSystem.file = open(cgd+"/"+fileName,"w+t")
file = self.rewritingSystem.file
file.write("from ASG_Buttons import *\n")
file.write("from ButtonConfig import *\n")
file.write("from ATOM3Enum import *\n")
file.write("from ATOM3List import *\n")
file.write("from ATOM3Float import *\n")
file.write("from ATOM3Integer import *\n")
file.write("from ATOM3Attribute import *\n")
file.write("from ATOM3Constraint import *\n")
file.write("from ATOM3String import *\n")
file.write("from ATOM3BottomType import *\n")
file.write("from ATOM3Boolean import *\n")
file.write("from ATOM3Appearance import *\n")
file.write("from ATOM3Link import *\n")
file.write("def "+self.rewritingSystem.name+"(self, rootNode):\n")
file.write(" rootNode.Formalism_Name.setValue('"+self.rewritingSystem.name+"')\n")
file.write(" rootNode.RowSize.setValue(4)\n")
file.write(" rootNode.Formalism_File.setValue('"+cgd+"/"+self.rewritingSystem.name+"_MM.py')\n")
for nt in graph.listNodes.keys():
for node in graph.listNodes[nt]:
node.visited = 0
def finalAction(self, graph):
file = self.rewritingSystem.file
file.write("newfunction = "+self.rewritingSystem.name+"\n")
file.write("loadedMMName = 'Buttons'\n")
for nt in graph.listNodes.keys():
for node in graph.listNodes[nt]:
del node.visited
del self.rewritingSystem.file
del self.rewritingSystem.NButtons
|
gpl-3.0
|
Frouk/zulip
|
zproject/local_settings_template.py
|
1
|
15172
|
# Settings for Zulip Voyager
### MANDATORY SETTINGS
#
# These settings MUST be set in production. In a development environment,
# sensible default values will be used.
# The user-accessible Zulip hostname for this installation, e.g.
# zulip.example.com
EXTERNAL_HOST = 'zulip.example.com'
# The email address for the person or team who maintain the Zulip
# Voyager installation. Will also get support emails. (e.g. [email protected])
ZULIP_ADMINISTRATOR = '[email protected]'
# The domain for your organization, e.g. example.com
ADMIN_DOMAIN = 'example.com'
# Enable at least one of the following authentication backends.
AUTHENTICATION_BACKENDS = (
# 'zproject.backends.EmailAuthBackend', # Email and password; see SMTP setup below
# 'zproject.backends.ZulipRemoteUserBackend', # Local SSO
# 'zproject.backends.GoogleMobileOauth2Backend', # Google Apps, setup below
# 'zproject.backends.ZulipLDAPAuthBackend', # LDAP, setup below
)
# Google Oauth requires a bit of configuration; you will need to go to
# do the following:
#
# (1) Visit https://console.developers.google.com, setup an
# Oauth2 client ID that allows redirects to
# e.g. https://zulip.example.com/accounts/login/google/done/.
#
# (2) Then click into the APIs and Auth section (in the sidebar on the
# left side of the page), APIs, then under "Social APIs" click on
# "Google+ API" and click the button to enable the API.
#
# (3) put your client secret as "google_oauth2_client_secret" in
# zulip-secrets.conf, and your client ID right here:
# GOOGLE_OAUTH2_CLIENT_ID=<your client ID from Google>
# If you are using the ZulipRemoteUserBackend authentication backend,
# set this to your domain (e.g. if REMOTE_USER is "username" and the
# corresponding email address is "[email protected]", set
# SSO_APPEND_DOMAIN = "example.com")
SSO_APPEND_DOMAIN = None
# Configure the outgoing SMTP server below. For testing, you can skip
# sending emails entirely by commenting out EMAIL_HOST, but you will
# want to configure this to support email address confirmation emails,
# missed message emails, onboarding follow-up emails, etc. To
# configure SMTP, you will need to complete the following steps:
#
# (1) Fill out the outgoing email sending configuration below.
#
# (2) Put the SMTP password for EMAIL_HOST_USER in
# /etc/zulip/zulip-secrets.conf as email_password.
#
# (3) If you are using a gmail account to send outgoing email, you
# will likely need to read this Google support answer and configure
# that account as "less secure":
# https://support.google.com/mail/answer/14257.
#
# You can quickly test your sending email configuration using:
# ./manage.py send_test_email [email protected]
#
# A common problem is hosting providers that block outgoing SMTP traffic.
#
# With the exception of reading EMAIL_HOST_PASSWORD from
# email_password in the Zulip secrets file, Zulip uses Django's
# standard EmailBackend, so if you're having issues, you may want to
# search for documentation on using your email provider with Django.
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = ''
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# The email From address to be used for automatically generated emails
DEFAULT_FROM_EMAIL = "Zulip <[email protected]>"
# The noreply address to be used as Reply-To for certain generated emails.
# Messages sent to this address should not be delivered anywhere.
NOREPLY_EMAIL_ADDRESS = "[email protected]"
# A list of strings representing the host/domain names that this
# Django site can serve. You should reset it to be a list of
# domains/IP addresses for your site. This is a security measure to
# prevent an attacker from poisoning caches and triggering password
# reset emails with links to malicious hosts by submitting requests
# with a fake HTTP Host header. You must include 'localhost' here.
ALLOWED_HOSTS = ['*']
### OPTIONAL SETTINGS
# Controls whether session cookies expire when the browser closes
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# Session cookie expiry in seconds after the last page load
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # 2 weeks
# Controls whether or not there is a feedback button in the UI.
ENABLE_FEEDBACK = False
# By default, the feedback button will submit feedback to the Zulip
# developers. If you set FEEDBACK_EMAIL to be an email address
# (e.g. ZULIP_ADMINISTRATOR), feedback sent by your users will instead
# be sent to that email address.
FEEDBACK_EMAIL = ZULIP_ADMINISTRATOR
# Controls whether or not error reports are sent to Zulip. Error
# reports are used to improve the quality of the product and do not
# include message contents; please contact Zulip support with any
# questions.
ERROR_REPORTING = True
# Controls whether or not Zulip will provide inline image preview when
# a link to an image is referenced in a message.
INLINE_IMAGE_PREVIEW = True
# By default, files uploaded by users and user avatars are stored
# directly on the Zulip server. If file storage in Amazon S3 is
# desired, you can configure that as follows:
#
# (1) Set s3_key and s3_secret_key in /etc/zulip/zulip-secrets.conf to
# be the S3 access and secret keys that you want to use, and setting
# the S3_AUTH_UPLOADS_BUCKET and S3_AVATAR_BUCKET to be the S3 buckets
# you've created to store file uploads and user avatars, respectively.
# Then restart Zulip (scripts/restart-zulip).
#
# (2) Edit /etc/nginx/sites-available/zulip-enterprise to comment out
# the nginx configuration for /user_uploads and /user_avatars (see
# https://github.com/zulip/zulip/issues/291 for discussion of a better
# solution that won't be automatically reverted by the Zulip upgrade
# script), and then restart nginx.
LOCAL_UPLOADS_DIR = "/home/zulip/uploads"
#S3_AUTH_UPLOADS_BUCKET = ""
#S3_AVATAR_BUCKET = ""
# Maximum allowed size of uploaded files, in megabytes. DO NOT SET
# ABOVE 80MB. The file upload implementation doesn't support chunked
# uploads, so browsers will crash if you try uploading larger files.
MAX_FILE_UPLOAD_SIZE = 25
# Controls whether name changes are completely disabled for this installation
# This is useful in settings where you're syncing names from an integrated LDAP/Active Directory
NAME_CHANGES_DISABLED = False
# Controls whether users who have not uploaded an avatar will receive an avatar
# from gravatar.com.
ENABLE_GRAVATAR = True
# To override the default avatar image if ENABLE_GRAVATAR is False, place your
# custom default avatar image at /home/zulip/local-static/default-avatar.png
# and uncomment the following line.
#DEFAULT_AVATAR_URI = '/local-static/default-avatar.png'
# To access an external postgres database you should define the host name in
# REMOTE_POSTGRES_HOST, you can define the password in the secrets file in the
# property postgres_password, and the SSL connection mode in REMOTE_POSTGRES_SSLMODE
# Different options are:
# disable: I don't care about security, and I don't want to pay the overhead of encryption.
# allow: I don't care about security, but I will pay the overhead of encryption if the server insists on it.
# prefer: I don't care about encryption, but I wish to pay the overhead of encryption if the server supports it.
# require: I want my data to be encrypted, and I accept the overhead. I trust that the network will make sure I always connect to the server I want.
# verify-ca: I want my data encrypted, and I accept the overhead. I want to be sure that I connect to a server that I trust.
# verify-full: I want my data encrypted, and I accept the overhead. I want to be sure that I connect to a server I trust, and that it's the one I specify.
#REMOTE_POSTGRES_HOST = 'dbserver.example.com'
#REMOTE_POSTGRES_SSLMODE = 'require'
### TWITTER INTEGRATION
# Zulip supports showing inline Tweet previews when a tweet is linked
# to in a message. To support this, Zulip must have access to the
# Twitter API via OAuth. To obtain the various access tokens needed
# below, you must register a new application under your Twitter
# account by doing the following:
#
# 1. Log in to http://dev.twitter.com.
# 2. In the menu under your username, click My Applications. From this page, create a new application.
# 3. Click on the application you created and click "create my access token".
# 4. Fill in the values for twitter_consumer_key, twitter_consumer_secret, twitter_access_token_key,
# and twitter_access_token_secret in /etc/zulip/zulip-secrets.conf.
### EMAIL GATEWAY INTEGRATION
# The Email gateway integration supports sending messages into Zulip
# by sending an email. This is useful for receiving notifications
# from third-party services that only send outgoing notifications via
# email. Once this integration is configured, each stream will have
# an email address documented on the stream settings page an emails
# sent to that address will be delivered into the stream.
#
# There are two ways to configure email mirroring in Zulip:
# 1. Local delivery: A MTA runs locally and passes mail directly to Zulip
# 2. Polling: Checks an IMAP inbox every minute for new messages.
#
# The local delivery configuration is preferred for production because
# it supports nicer looking email addresses and has no cron delay,
# while the polling mechanism is better for testing/developing this
# feature because it doesn't require a public-facing IP/DNS setup.
#
# The main email mirror setting is the email address pattern, where
# you specify the email address format you'd like the integration to
# use. It should be one of the following:
# %[email protected] (for local delivery)
# username+%[email protected] (for polling if [email protected])
EMAIL_GATEWAY_PATTERN = ""
#
# If you are using local delivery, EMAIL_GATEWAY_PATTERN is all you need
# to change in this file. You will also need to enable the Zulip postfix
# configuration to support local delivery by adding
# , zulip::postfix_localmail
# to puppet_classes in /etc/zulip/zulip.conf and then running
# `scripts/zulip-puppet-apply -f` to do the installation.
#
# If you are using polling, you will need to setup an IMAP email
# account dedicated to Zulip email gateway messages. The model is
# that users will send emails to that account via an address of the
# form username+%[email protected] (which is what you will set as
# EMAIL_GATEWAY_PATTERN); your email provider should deliver those
# emails to the [email protected] inbox. Then you run in a cron
# job `./manage.py email-mirror` (see puppet/zulip/files/cron.d/email-mirror),
# which will check that inbox and batch-process any new messages.
#
# You will need to configure authentication for the email mirror
# command to access the IMAP mailbox below and in zulip-secrets.conf.
#
# The IMAP login; username here and password as email_gateway_login in
# zulip-secrets.conf.
EMAIL_GATEWAY_LOGIN = ""
# The IMAP server & port to connect to
EMAIL_GATEWAY_IMAP_SERVER = ""
EMAIL_GATEWAY_IMAP_PORT = 993
# The IMAP folder name to check for emails. All emails sent to EMAIL_GATEWAY_PATTERN above
# must be delivered to this folder
EMAIL_GATEWAY_IMAP_FOLDER = "INBOX"
### LDAP integration configuration
# Zulip supports retrieving information about users via LDAP, and
# optionally using LDAP as an authentication mechanism.
#
# In either configuration, you will need to do the following:
#
# * Fill in the LDAP configuration options below so that Zulip can
# connect to your LDAP server
#
# * Setup the mapping between email addresses (used as login names in
# Zulip) and LDAP usernames. There are two supported ways to setup
# the username mapping:
#
# (A) If users' email addresses are in LDAP, set
# LDAP_APPEND_DOMAIN = None
# AUTH_LDAP_USER_SEARCH to lookup users by email address
#
# (B) If LDAP only has usernames but email addresses are of the form
# [email protected], you should set:
# LDAP_APPEND_DOMAIN = example.com and
# AUTH_LDAP_USER_SEARCH to lookup users by username
#
# You can quickly test whether your configuration works by running:
# ./manage.py query_ldap [email protected]
# From the root of your Zulip installation; if your configuration is working
# that will output the full name for your user.
#
# -------------------------------------------------------------
#
# If you are using LDAP for authentication, you will need to enable
# the zproject.backends.ZulipLDAPAuthBackend auth backend in
# AUTHENTICATION_BACKENDS above. After doing so, you should be able
# to login to Zulip by entering your email address and LDAP password
# on the Zulip login form.
#
# If you are using LDAP to populate names in Zulip, once you finish
# configuring this integration, you will need to run:
# ./manage.py sync_ldap_user_data
# To sync names for existing users; you may want to run this in a cron
# job to pick up name changes made on your LDAP server.
import ldap
from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
# URI of your LDAP server. If set, LDAP is used to prepopulate a user's name in
# Zulip. Example: "ldaps://ldap.example.com"
AUTH_LDAP_SERVER_URI = ""
# This DN will be used to bind to your server. If unset, anonymous
# binds are performed. If set, you need to specify the password as
# 'auth_ldap_bind_password' in zulip-secrets.conf.
AUTH_LDAP_BIND_DN = ""
# Specify the search base and the property to filter on that corresponds to the
# username.
AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=users,dc=example,dc=com",
ldap.SCOPE_SUBTREE, "(uid=%(user)s)")
# If the value of a user's "uid" (or similar) property is not their email
# address, specify the domain to append here.
LDAP_APPEND_DOMAIN = None
# This map defines how to populate attributes of a Zulip user from LDAP.
AUTH_LDAP_USER_ATTR_MAP = {
# Populate the Django user's name from the LDAP directory.
"full_name": "cn",
}
# The default CAMO_URI of '/external_content/' is served by the camo
# setup in the default Voyager nginx configuration. Setting CAMO_URI
# to '' will disable the Camo integration.
CAMO_URI = '/external_content/'
# RabbitMQ configuration
#
# By default, Zulip connects to rabbitmq running locally on the machine,
# but Zulip also supports connecting to RabbitMQ over the network;
# to use a remote RabbitMQ instance, set RABBITMQ_HOST here.
# RABBITMQ_HOST = "localhost"
# To use another rabbitmq user than the default 'zulip', set RABBITMQ_USERNAME here.
# RABBITMQ_USERNAME = 'zulip'
# Memcached configuration
#
# By default, Zulip connects to memcached running locally on the machine,
# but Zulip also supports connecting to memcached over the network;
# to use a remote Memcached instance, set MEMCACHED_LOCATION here.
# Format HOST:PORT
# MEMCACHED_LOCATION = 127.0.0.1:11211
# Redis configuration
#
# By default, Zulip connects to redis running locally on the machine,
# but Zulip also supports connecting to redis over the network;
# to use a remote RabbitMQ instance, set REDIS_HOST here.
# REDIS_HOST = '127.0.0.1'
# For a different redis port set the REDIS_PORT here.
# REDIS_PORT = 6379
# Controls whether Zulip will rate-limit user requests.
# RATE_LIMITING = True
|
apache-2.0
|
nicfit/mishmash
|
tests/conftest.py
|
1
|
2839
|
import uuid
import shutil
import tempfile
import pytest
import sqlalchemy_utils
from uuid import uuid4
from pathlib import Path
from collections import namedtuple
from tempfile import NamedTemporaryFile
import mishmash.orm
import mishmash.database
from mishmash.__main__ import MishMash
from .factories import Mp3AudioFileFactory, TagFactory, LibraryFactory
TestDatabase = namedtuple("TestDatabase", ["url", "engine", "SessionMaker"])
@pytest.fixture(scope="function",
params=["sqlite", "postgresql"])
def database(request, pg_server):
db_file = None
if request.param == "sqlite":
db_file = NamedTemporaryFile(suffix=".sqlite", delete=False)
db_file.close()
db_url = f"sqlite:///{db_file.name}"
elif request.param == "postgresql":
db_url = "postgresql://{user}:{password}@{host}:{port}/{db}"\
.format(db=str(uuid4()), **pg_server["params"])
sqlalchemy_utils.create_database(db_url)
else:
assert not("unhandled db: " + request.param)
return
engine, SessionMaker, connection = mishmash.database.init(db_url)
# Outermost transaction that is always rolled back
trans = connection.begin()
yield TestDatabase(url=db_url, engine=engine, SessionMaker=SessionMaker)
# ... teardown
trans.rollback()
connection.close()
if request.param == "sqlite":
assert db_file
Path(db_file.name).unlink()
else:
mishmash.database.dropAll(db_url)
@pytest.fixture
def session(database):
session = database.SessionMaker()
yield session
# ... teardown
session.rollback()
session.close()
@pytest.fixture(scope="session", autouse=True)
def mishmash_tempdir():
global TEMP_DIR
temp_d = tempfile.TemporaryDirectory()
TEMP_DIR = temp_d
yield Path(temp_d.name)
if Path(temp_d.name).exists():
temp_d.cleanup()
@pytest.fixture(scope="function")
def mp3audiofile(mishmash_tempdir):
mp3_file = Mp3AudioFileFactory(tag=TagFactory())
yield mp3_file
Path(mp3_file.path).unlink()
def _tempCopy(src, dest_dir):
testfile = Path(str(dest_dir)) / "{}.mp3".format(uuid.uuid4())
shutil.copyfile(str(src), str(testfile))
return testfile
@pytest.fixture(scope="function")
def db_library(session):
lib = LibraryFactory()
session.add(lib)
session.commit()
yield lib
# ... teardown
# the session is getting popped, so do nothing
@pytest.fixture(scope="session")
def mishmash_cmd():
def func(args, expected_retval=0, db_url=None):
if database:
args = ["--database", db_url] + args
app = MishMash()
try:
retval = app._run(args)
except SystemExit as exit:
retval = exit.code
assert retval == expected_retval
return func
|
gpl-3.0
|
kevinr/750book-web
|
750book-web-env/lib/python2.7/site-packages/django/db/models/expressions.py
|
81
|
3283
|
from datetime import datetime
from django.utils import tree
from django.utils.copycompat import deepcopy
class ExpressionNode(tree.Node):
"""
Base class for all query expressions.
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
MOD = '%%' # This is a quoted % operator - it is quoted
# because it can be used in strings that also
# have parameter substitution.
# Bitwise operators
AND = '&'
OR = '|'
def __init__(self, children=None, connector=None, negated=False):
if children is not None and len(children) > 1 and connector is None:
raise TypeError('You have to specify a connector.')
super(ExpressionNode, self).__init__(children, connector, negated)
def _combine(self, other, connector, reversed, node=None):
if reversed:
obj = ExpressionNode([other], connector)
obj.add(node or self, connector)
else:
obj = node or ExpressionNode([self], connector)
obj.add(other, connector)
return obj
###################
# VISITOR METHODS #
###################
def prepare(self, evaluator, query, allow_joins):
return evaluator.prepare_node(self, query, allow_joins)
def evaluate(self, evaluator, qn, connection):
return evaluator.evaluate_node(self, qn, connection)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __div__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __and__(self, other):
return self._combine(other, self.AND, False)
def __or__(self, other):
return self._combine(other, self.OR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rdiv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rand__(self, other):
return self._combine(other, self.AND, True)
def __ror__(self, other):
return self._combine(other, self.OR, True)
def prepare_database_save(self, unused):
return self
class F(ExpressionNode):
"""
An expression representing the value of the given field.
"""
def __init__(self, name):
super(F, self).__init__(None, None, False)
self.name = name
def __deepcopy__(self, memodict):
obj = super(F, self).__deepcopy__(memodict)
obj.name = self.name
return obj
def prepare(self, evaluator, query, allow_joins):
return evaluator.prepare_leaf(self, query, allow_joins)
def evaluate(self, evaluator, qn, connection):
return evaluator.evaluate_leaf(self, qn, connection)
|
mit
|
edwann13/weatherscript
|
python-weather-api-read-only/examples/get-weather.py
|
29
|
3351
|
#!/usr/bin/env python3
#Copyright (c) 2010 Dimitris Leventeas <[email protected]>
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
from optparse import OptionParser
from xml.etree.cElementTree import ElementTree, Element
import pywapi
def write_everything_from_yahoo_to_xml(country, cities, outfile='weather.xml'):
""" Write all the results from google to an xml file """
weather_reports = pywapi.get_everything_from_yahoo(country, cities)
xml_output = Element('Weather')
for city, report in weather_reports.items():
try:
xml_city = Element('town')
xml_name = Element('name')
xml_name.text = city
xml_city.append(xml_name)
xml_temperature = Element('temperature')
temp_c = report['wind']['chill']
temp_unit = report['units']['temperature']
temp_cond = ''.join([temp_c, ' ', temp_unit])
xml_temperature.text = temp_cond
xml_city.append(xml_temperature)
xml_humidity = Element('humidity')
xml_humidity.text = report['atmosphere']['humidity']
xml_city.append(xml_humidity)
xml_condition = Element('condition')
xml_condition.text = report['condition']['text']
xml_city.append(xml_condition)
xml_wind = Element('wind')
beaufort = pywapi.wind_beaufort_scale(report['wind']['speed'])
direction = pywapi.wind_direction(report['wind']['direction'])
wind_cond = ''.join([beaufort, ' ', direction])
xml_wind.text = wind_cond
xml_city.append(xml_wind)
xml_output.append(xml_city)
except KeyError:
pass
ElementTree(xml_output).write(outfile, 'UTF-8')
def main():
parser = OptionParser(\
usage='Collect information about the weather in Greece.')
parser.add_option("-f", "--file", dest="filename", default="weather.xml",\
help="write directory contents to FILE (default: weather.xml)",\
metavar="FILE")
(options, args) = parser.parse_args()
# Greece (GRXX) has 81 cities available with data
write_everything_from_yahoo_to_xml('GRXX', 81, outfile=options.filename)
if __name__ == '__main__':
main()
|
mit
|
NexusIS/tempest
|
tempest/api/compute/admin/test_instance_usage_audit_log.py
|
17
|
2319
|
# Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from six.moves.urllib import parse as urllib
from tempest.api.compute import base
from tempest import test
class InstanceUsageAuditLogTestJSON(base.BaseV2ComputeAdminTest):
@classmethod
def setup_clients(cls):
super(InstanceUsageAuditLogTestJSON, cls).setup_clients()
cls.adm_client = cls.os_adm.instance_usages_audit_log_client
@test.idempotent_id('25319919-33d9-424f-9f99-2c203ee48b9d')
def test_list_instance_usage_audit_logs(self):
# list instance usage audit logs
body = self.adm_client.list_instance_usage_audit_logs()
expected_items = ['total_errors', 'total_instances', 'log',
'num_hosts_running', 'num_hosts_done',
'num_hosts', 'hosts_not_run', 'overall_status',
'period_ending', 'period_beginning',
'num_hosts_not_run']
for item in expected_items:
self.assertIn(item, body)
@test.idempotent_id('6e40459d-7c5f-400b-9e83-449fbc8e7feb')
def test_get_instance_usage_audit_log(self):
# Get instance usage audit log before specified time
now = datetime.datetime.now()
body = self.adm_client.show_instance_usage_audit_log(
urllib.quote(now.strftime("%Y-%m-%d %H:%M:%S")))
expected_items = ['total_errors', 'total_instances', 'log',
'num_hosts_running', 'num_hosts_done', 'num_hosts',
'hosts_not_run', 'overall_status', 'period_ending',
'period_beginning', 'num_hosts_not_run']
for item in expected_items:
self.assertIn(item, body)
|
apache-2.0
|
Stefan-Korner/SpacePyLibrary
|
CS/CNCclient.py
|
1
|
7170
|
#******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# CNC client for connection to SCOE *
# implements CAIT-03474-ASTR_issue_3_EGSE_IRD.pdf *
#******************************************************************************
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import CCSDS.PACKET
import EGSE.CNC, EGSE.IF
import MC.IF
import PUS.PACKET
import UTIL.TASK
###########
# classes #
###########
# =============================================================================
class TCclient(EGSE.CNC.TCclient):
"""Subclass of EGSE.CNC.TCclient"""
# this client sends CnC commands
# and receives automatically ACK/NAK CnC responses
# ---------------------------------------------------------------------------
def __init__(self):
"""Initialise attributes only"""
EGSE.CNC.TCclient.__init__(self)
# ---------------------------------------------------------------------------
def connected(self):
"""hook for derived classes"""
LOG_INFO("TCclient.connected", "CNC")
EGSE.IF.s_cncClientConfiguration.connected = True
UTIL.TASK.s_processingTask.notifyCNCconnected()
# ---------------------------------------------------------------------------
def disconnected(self):
"""hook for derived classes"""
LOG_WARNING("TCclient.disconnected", "CNC")
EGSE.IF.s_cncClientConfiguration.connected = False
UTIL.TASK.s_processingTask.notifyCNCdisconnected()
# ---------------------------------------------------------------------------
def pushTCpacket(self, tcPacketDu):
"""Consumes a telecommand packet"""
# the CCSDS TC packet is not checked but directly send
self.sendCNCpacket(tcPacketDu.getBuffer())
# ---------------------------------------------------------------------------
def notifyCNCresponse(self, cncAckNakDU):
"""CnC response received: overloaded from EGSE.CNC.TCclient"""
LOG_INFO("notifyCNCresponse: message = " + cncAckNakDU.getCNCmessage(), "CNC")
MC.IF.s_tmModel.pushTMpacket(cncAckNakDU, None)
# ---------------------------------------------------------------------------
def notifyCCSDSresponse(self, tcAckNakDU):
"""TC response received: overloaded from EGSE.CNC.TCclient"""
LOG_INFO("notifyCCSDSresponse: status = " + tcAckNakDU.getStatus(), "CNC")
MC.IF.s_tmModel.pushTMpacket(tcAckNakDU, None)
# =============================================================================
class TMclient(EGSE.CNC.TMclient):
"""Subclass of EGSE.CNC.TMclient"""
# this client only receives CCSDS TM packets
# ---------------------------------------------------------------------------
def __init__(self):
"""Initialise attributes only"""
EGSE.CNC.TMclient.__init__(self)
# ---------------------------------------------------------------------------
def connected(self):
"""hook for derived classes"""
LOG_INFO("TMclient.connected", "CNC")
EGSE.IF.s_cncClientConfiguration.connected2 = True
UTIL.TASK.s_processingTask.notifyCNC2connected()
# ---------------------------------------------------------------------------
def disconnected(self):
"""hook for derived classes"""
LOG_WARNING("TMclient.disconnected", "CNC")
EGSE.IF.s_cncClientConfiguration.connected2 = False
UTIL.TASK.s_processingTask.notifyCNC2disconnected()
# ---------------------------------------------------------------------------
def notifyTMpacket(self, tmPacket):
"""TM packet received: overloaded from EGSE.CNC.TMclient"""
if PUS.PACKET.isPUSpacket(tmPacket):
# PUS packet
tmPacketDu = PUS.PACKET.TMpacket(tmPacket)
LOG_INFO("PUS TM packet extracted", "CNC")
else:
# CCSDS packet
tmPacketDu = CCSDS.PACKET.TMpacket(tmPacket)
LOG_INFO("CCSDS TM packet extracted", "CNC")
MC.IF.s_tmModel.pushTMpacket(tmPacketDu, None)
####################
# global variables #
####################
# CNC clients are singletons
s_client = None
s_client2 = None
#############
# functions #
#############
# functions to encapsulate access to s_client and s_client2
# -----------------------------------------------------------------------------
def createClients():
"""create the EGSE clients"""
global s_client, s_client2
cncHost = EGSE.IF.s_cncClientConfiguration.cncHost
if cncHost == "":
LOG_INFO("no CNC connection configured", "CNC")
return
s_client = TCclient()
s_client2 = TMclient()
# -----------------------------------------------------------------------------
def connectCNC():
"""Connect CNC TC link"""
LOG_INFO("Connect CNC TC link", "CNC")
cncHost = EGSE.IF.s_cncClientConfiguration.cncHost
cncPort = EGSE.IF.s_cncClientConfiguration.cncPort
if cncHost == "" or cncPort == "-1":
LOG_ERROR("no CNC TC link configured", "CNC")
return
if not s_client.connectToServer(cncHost, int(cncPort)):
LOG_ERROR("Connect TC link failed", "CNC")
# -----------------------------------------------------------------------------
def disconnectCNC():
"""Disonnect CNC TC link"""
LOG_INFO("Disonnect CNC TC link", "CNC")
cncHost = EGSE.IF.s_cncClientConfiguration.cncHost
cncPort = EGSE.IF.s_cncClientConfiguration.cncPort
if cncHost == "" or cncPort == "-1":
LOG_ERROR("no CNC TC link configured", "CNC")
return
s_client.disconnectFromServer()
# -----------------------------------------------------------------------------
def connectCNC2():
"""Connect CNC TM link"""
LOG_INFO("Connect CNC TM link", "CNC")
cncHost = EGSE.IF.s_cncClientConfiguration.cncHost
cncPort2 = EGSE.IF.s_cncClientConfiguration.cncPort2
if cncHost == "" or cncPort2 == "-1":
LOG_ERROR("no CNC TM link configured", "CNC")
return
if not s_client2.connectToServer(cncHost, int(cncPort2)):
LOG_ERROR("Connect TM link failed", "CNC")
# -----------------------------------------------------------------------------
def disconnectCNC2():
"""Disonnect CNC TM link"""
LOG_INFO("Disonnect CNC TM link", "CNC")
cncHost = EGSE.IF.s_cncClientConfiguration.cncHost
cncPort2 = EGSE.IF.s_cncClientConfiguration.cncPort2
if cncHost == "" or cncPort2 == "-1":
LOG_ERROR("no CNC TM link configured", "CNC")
return
s_client2.disconnectFromServer()
|
mit
|
ceramos/micropython
|
tests/bytecode/pylib-tests/dummy_threading.py
|
210
|
2815
|
"""Faux ``threading`` version using ``dummy_thread`` instead of ``thread``.
The module ``_dummy_threading`` is added to ``sys.modules`` in order
to not have ``threading`` considered imported. Had ``threading`` been
directly imported it would have made all subsequent imports succeed
regardless of whether ``_thread`` was available which is not desired.
"""
from sys import modules as sys_modules
import _dummy_thread
# Declaring now so as to not have to nest ``try``s to get proper clean-up.
holding_thread = False
holding_threading = False
holding__threading_local = False
try:
# Could have checked if ``_thread`` was not in sys.modules and gone
# a different route, but decided to mirror technique used with
# ``threading`` below.
if '_thread' in sys_modules:
held_thread = sys_modules['_thread']
holding_thread = True
# Must have some module named ``_thread`` that implements its API
# in order to initially import ``threading``.
sys_modules['_thread'] = sys_modules['_dummy_thread']
if 'threading' in sys_modules:
# If ``threading`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held_threading = sys_modules['threading']
holding_threading = True
del sys_modules['threading']
if '_threading_local' in sys_modules:
# If ``_threading_local`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held__threading_local = sys_modules['_threading_local']
holding__threading_local = True
del sys_modules['_threading_local']
import threading
# Need a copy of the code kept somewhere...
sys_modules['_dummy_threading'] = sys_modules['threading']
del sys_modules['threading']
sys_modules['_dummy__threading_local'] = sys_modules['_threading_local']
del sys_modules['_threading_local']
from _dummy_threading import *
from _dummy_threading import __all__
finally:
# Put back ``threading`` if we overwrote earlier
if holding_threading:
sys_modules['threading'] = held_threading
del held_threading
del holding_threading
# Put back ``_threading_local`` if we overwrote earlier
if holding__threading_local:
sys_modules['_threading_local'] = held__threading_local
del held__threading_local
del holding__threading_local
# Put back ``thread`` if we overwrote, else del the entry we made
if holding_thread:
sys_modules['_thread'] = held_thread
del held_thread
else:
del sys_modules['_thread']
del holding_thread
del _dummy_thread
del sys_modules
|
mit
|
mmcdermo/helpinghand
|
server/venv/lib/python2.7/site-packages/django/contrib/admindocs/views.py
|
106
|
15138
|
import inspect
import os
import re
from django import template
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils.importlib import import_module
from django.utils._os import upath
from django.utils import six
from django.utils.translation import ugettext as _
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
@staff_member_required
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': urlresolvers.reverse('admin:index'),
}, context_instance=RequestContext(request))
@staff_member_required
def bookmarklets(request):
admin_root = urlresolvers.reverse('admin:index')
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': "%s://%s%s" % ('https' if request.is_secure() else 'http', request.get_host(), admin_root),
}, context_instance=RequestContext(request))
@staff_member_required
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
app_libs = list(six.iteritems(template.libraries))
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = ''
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'tags': tags
}, context_instance=RequestContext(request))
@staff_member_required
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
app_libs = list(six.iteritems(template.libraries))
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = ''
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'filters': filters
}, context_instance=RequestContext(request))
@staff_member_required
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [import_module(m) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = import_module(settings_mod.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)),
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'views': views
}, context_instance=RequestContext(request))
@staff_member_required
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
@staff_member_required
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'models': m_list
}, context_instance=RequestContext(request))
@staff_member_required
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404(_("App %r not found") % app_label)
model = None
for m in models.get_models(app_mod):
if m._meta.model_name == model_name:
model = m
break
if model is None:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label})
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.model_name),
})
fields.append({
'name' : "%s.count" % field.name,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.model_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.model_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name' : "%s.all" % accessor,
'data_type' : 'List',
'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.model_name),
})
fields.append({
'name' : "%s.count" % accessor,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.model_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': '%s.%s' % (opts.app_label, opts.object_name),
# Translators: %s is an object type name
'summary': _("Attributes on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
@staff_member_required
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = import_module(site_settings_module)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: open(template_file).read() if os.path.exists(template_file) else '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
try:
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(upath(mod.__file__)))
if p.endswith('.py') and p[0].isalpha()
]
except OSError:
libraries = []
for library_name in libraries:
try:
lib = template.get_library(library_name)
except template.InvalidTemplateLibrary:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + p.regex.pattern))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
|
mit
|
darina/omim
|
tools/python/maps_generator/generator/stages_declaration.py
|
2
|
14605
|
""""
This file contains possible stages that maps_generator can run.
Some algorithms suppose a maps genration processes looks like:
stage1, ..., stage_mwm[country_stage_1, ..., country_stage_M], ..., stageN
Only stage_mwm can contain country_
"""
import datetime
import json
import logging
import multiprocessing
import os
import shutil
import tarfile
from collections import defaultdict
from multiprocessing.pool import ThreadPool
from typing import AnyStr
from typing import Type
import maps_generator.generator.diffs as diffs
import maps_generator.generator.stages_tests as st
from descriptions.descriptions_downloader import check_and_get_checker
from descriptions.descriptions_downloader import download_from_wikidata_tags
from descriptions.descriptions_downloader import download_from_wikipedia_tags
from maps_generator.generator import coastline
from maps_generator.generator import settings
from maps_generator.generator import steps
from maps_generator.generator.env import Env
from maps_generator.generator.env import PathProvider
from maps_generator.generator.env import WORLD_COASTS_NAME
from maps_generator.generator.env import WORLD_NAME
from maps_generator.generator.exceptions import BadExitStatusError
from maps_generator.generator.gen_tool import run_gen_tool
from maps_generator.generator.stages import InternalDependency as D
from maps_generator.generator.stages import Stage
from maps_generator.generator.stages import Test
from maps_generator.generator.stages import country_stage
from maps_generator.generator.stages import depends_from_internal
from maps_generator.generator.stages import helper_stage_for
from maps_generator.generator.stages import mwm_stage
from maps_generator.generator.stages import outer_stage
from maps_generator.generator.stages import production_only
from maps_generator.generator.stages import stages
from maps_generator.generator.stages import test_stage
from maps_generator.generator.statistics import get_stages_info
from maps_generator.utils.file import download_files
from maps_generator.utils.file import is_verified
from post_generation.hierarchy_to_countries import hierarchy_to_countries
from post_generation.inject_promo_ids import inject_promo_ids
from post_generation.localads_mwm_to_csv import create_csv
logger = logging.getLogger("maps_generator")
def is_accepted(env: Env, stage: Type[Stage]) -> bool:
return env.is_accepted_stage(stage)
@outer_stage
class StageDownloadAndConvertPlanet(Stage):
def apply(self, env: Env, force_download: bool = True, **kwargs):
if force_download or not is_verified(env.paths.planet_o5m):
steps.step_download_and_convert_planet(
env, force_download=force_download, **kwargs
)
@outer_stage
class StageUpdatePlanet(Stage):
def apply(self, env: Env, **kwargs):
steps.step_update_planet(env, **kwargs)
@outer_stage
class StageCoastline(Stage):
def apply(self, env: Env, use_old_if_fail=True):
coasts_geom = "WorldCoasts.geom"
coasts_rawgeom = "WorldCoasts.rawgeom"
try:
coastline.make_coastline(env)
except BadExitStatusError as e:
if not use_old_if_fail:
raise e
logger.info("Build costs failed. Try to download the costs...")
download_files(
{
settings.PLANET_COASTS_GEOM_URL: os.path.join(
env.paths.coastline_path, coasts_geom
),
settings.PLANET_COASTS_RAWGEOM_URL: os.path.join(
env.paths.coastline_path, coasts_rawgeom
),
}
)
for f in [coasts_geom, coasts_rawgeom]:
path = os.path.join(env.paths.coastline_path, f)
shutil.copy2(path, env.paths.intermediate_data_path)
@outer_stage
class StagePreprocess(Stage):
def apply(self, env: Env, **kwargs):
steps.step_preprocess(env, **kwargs)
@outer_stage
@depends_from_internal(
D(settings.HOTELS_URL, PathProvider.hotels_path, "p"),
D(settings.PROMO_CATALOG_CITIES_URL, PathProvider.promo_catalog_cities_path, "p"),
D(settings.POPULARITY_URL, PathProvider.popularity_path, "p"),
D(settings.FOOD_URL, PathProvider.food_paths, "p"),
D(settings.FOOD_TRANSLATIONS_URL, PathProvider.food_translations_path, "p"),
)
@test_stage(Test(st.make_test_booking_data(max_days=7), lambda e, _: e.production, True))
class StageFeatures(Stage):
def apply(self, env: Env):
extra = {}
if is_accepted(env, StageDescriptions):
extra.update({"idToWikidata": env.paths.id_to_wikidata_path})
if env.production:
extra.update(
{
"booking_data": env.paths.hotels_path,
"promo_catalog_cities": env.paths.promo_catalog_cities_path,
"popular_places_data": env.paths.popularity_path,
"brands_data": env.paths.food_paths,
"brands_translations_data": env.paths.food_translations_path,
}
)
if is_accepted(env, StageCoastline):
extra.update({"emit_coasts": True})
if is_accepted(env, StageIsolinesInfo):
extra.update({"isolines_path": PathProvider.isolines_path()})
steps.step_features(env, **extra)
if os.path.exists(env.paths.packed_polygons_path):
shutil.copy2(env.paths.packed_polygons_path, env.paths.mwm_path)
@outer_stage
@production_only
@helper_stage_for("StageDescriptions")
class StageDownloadDescriptions(Stage):
def apply(self, env: Env):
run_gen_tool(
env.gen_tool,
out=env.get_subprocess_out(),
err=env.get_subprocess_out(),
intermediate_data_path=env.paths.intermediate_data_path,
cache_path=env.paths.cache_path,
user_resource_path=env.paths.user_resource_path,
dump_wikipedia_urls=env.paths.wiki_url_path,
idToWikidata=env.paths.id_to_wikidata_path,
threads_count=settings.THREADS_COUNT,
)
langs = ("en", "ru", "es", "fr", "de")
checker = check_and_get_checker(env.paths.popularity_path)
download_from_wikipedia_tags(
env.paths.wiki_url_path, env.paths.descriptions_path, langs, checker
)
download_from_wikidata_tags(
env.paths.id_to_wikidata_path, env.paths.descriptions_path, langs, checker
)
@outer_stage
@mwm_stage
class StageMwm(Stage):
def apply(self, env: Env):
with ThreadPool(settings.THREADS_COUNT) as pool:
pool.map(
lambda c: StageMwm.make_mwm(c, env),
env.get_tmp_mwm_names(),
chunksize=1,
)
@staticmethod
def make_mwm(country: AnyStr, env: Env):
world_stages = {
WORLD_NAME: [StageIndex, StageCitiesIdsWorld, StageRoutingWorld, StageMwmStatistics],
WORLD_COASTS_NAME: [StageIndex, StageMwmStatistics],
}
mwm_stages = [
StageIndex,
StageUgc,
StagePopularity,
StageSrtm,
StageIsolinesInfo,
StageDescriptions,
StageRouting,
StageRoutingTransit,
StageMwmDiffs,
StageMwmStatistics,
]
for stage in world_stages.get(country, mwm_stages):
stage(country=country)(env)
env.finish_mwm(country)
@country_stage
@depends_from_internal(
D(settings.UK_POSTCODES_URL, PathProvider.uk_postcodes_path, "p"),
D(settings.US_POSTCODES_URL, PathProvider.us_postcodes_path, "p"),
)
class StageIndex(Stage):
def apply(self, env: Env, country, **kwargs):
if country == WORLD_NAME:
steps.step_index_world(env, country, **kwargs)
elif country == WORLD_COASTS_NAME:
steps.step_coastline_index(env, country, **kwargs)
else:
if env.production:
kwargs.update(
{
"uk_postcodes_dataset": env.paths.uk_postcodes_path,
"us_postcodes_dataset": env.paths.us_postcodes_path,
}
)
steps.step_index(env, country, **kwargs)
@country_stage
@production_only
class StageCitiesIdsWorld(Stage):
def apply(self, env: Env, country, **kwargs):
steps.step_cities_ids_world(env, country, **kwargs)
@country_stage
@depends_from_internal(D(settings.WORLDROADS_URL, PathProvider.worldroads_path),)
class StageRoutingWorld(Stage):
def apply(self, env: Env, country, **kwargs):
steps.step_routing_world(env, country, **kwargs)
@country_stage
@depends_from_internal(D(settings.UGC_URL, PathProvider.ugc_path),)
@production_only
class StageUgc(Stage):
def apply(self, env: Env, country, **kwargs):
steps.step_ugc(env, country, **kwargs)
@country_stage
@production_only
class StagePopularity(Stage):
def apply(self, env: Env, country, **kwargs):
steps.step_popularity(env, country, **kwargs)
@country_stage
@production_only
class StageSrtm(Stage):
def apply(self, env: Env, country, **kwargs):
steps.step_srtm(env, country, **kwargs)
@country_stage
@production_only
class StageIsolinesInfo(Stage):
def apply(self, env: Env, country, **kwargs):
steps.step_isolines_info(env, country, **kwargs)
@country_stage
@production_only
class StageDescriptions(Stage):
def apply(self, env: Env, country, **kwargs):
steps.step_description(env, country, **kwargs)
@country_stage
class StageRouting(Stage):
def apply(self, env: Env, country, **kwargs):
steps.step_routing(env, country, **kwargs)
@country_stage
@depends_from_internal(
D(settings.SUBWAY_URL, PathProvider.subway_path),
D(settings.TRANSIT_URL, PathProvider.transit_path_experimental),
)
class StageRoutingTransit(Stage):
def apply(self, env: Env, country, **kwargs):
steps.step_routing_transit(env, country, **kwargs)
@country_stage
@production_only
class StageMwmDiffs(Stage):
def apply(self, env: Env, country, logger, **kwargs):
data_dir = diffs.DataDir(
mwm_name=env.build_name, new_version_dir=env.build_path,
old_version_root_dir=settings.DATA_ARCHIVE_DIR
)
diffs.mwm_diff_calculation(
data_dir, logger, depth=settings.DIFF_VERSION_DEPTH
)
@country_stage
@helper_stage_for("StageStatistics")
class StageMwmStatistics(Stage):
def apply(self, env: Env, country, **kwargs):
steps.step_statistics(env, country, **kwargs)
@outer_stage
@depends_from_internal(
D(
settings.PROMO_CATALOG_COUNTRIES_URL,
PathProvider.promo_catalog_countries_path,
"p",
)
)
class StageCountriesTxt(Stage):
def apply(self, env: Env):
countries = hierarchy_to_countries(
env.paths.old_to_new_path,
env.paths.borders_to_osm_path,
env.paths.countries_synonyms_path,
env.paths.hierarchy_path,
env.paths.mwm_path,
env.paths.mwm_version,
)
if env.production:
countries_json = json.loads(countries)
inject_promo_ids(
countries_json,
env.paths.promo_catalog_cities_path,
env.paths.promo_catalog_countries_path,
env.paths.mwm_path,
env.paths.types_path,
env.paths.mwm_path,
)
with open(env.paths.counties_txt_path, "w") as f:
json.dump(countries_json, f, ensure_ascii=True, indent=1)
@outer_stage
class StageExternalResources(Stage):
def apply(self, env: Env):
black_list = {"00_roboto_regular.ttf"}
resources = [
os.path.join(env.paths.user_resource_path, file)
for file in os.listdir(env.paths.user_resource_path)
if file.endswith(".ttf") and file not in black_list
]
for ttf_file in resources:
shutil.copy2(ttf_file, env.paths.mwm_path)
for file in os.listdir(env.paths.mwm_path):
if file.startswith(WORLD_NAME) and file.endswith(".mwm"):
resources.append(os.path.join(env.paths.mwm_path, file))
resources.sort()
with open(env.paths.external_resources_path, "w") as f:
for resource in resources:
fd = os.open(resource, os.O_RDONLY)
f.write(f"{os.path.basename(resource)} {os.fstat(fd).st_size}\n")
@outer_stage
@production_only
class StageLocalAds(Stage):
def apply(self, env: Env):
create_csv(
env.paths.localads_path,
env.paths.mwm_path,
env.paths.mwm_path,
env.mwm_version,
multiprocessing.cpu_count(),
)
with tarfile.open(f"{env.paths.localads_path}.tar.gz", "w:gz") as tar:
for filename in os.listdir(env.paths.localads_path):
tar.add(
os.path.join(env.paths.localads_path, filename), arcname=filename
)
@outer_stage
class StageStatistics(Stage):
def apply(self, env: Env):
steps_info = get_stages_info(env.paths.log_path, {"statistics"})
stats = defaultdict(lambda: defaultdict(dict))
stats["steps"] = steps_info["steps"]
for country in env.get_tmp_mwm_names():
with open(os.path.join(env.paths.stats_path, f"{country}.json")) as f:
stats["countries"][country] = {
"types": json.load(f),
"steps": steps_info["countries"][country],
}
def default(o):
if isinstance(o, datetime.timedelta):
return str(o)
with open(os.path.join(env.paths.stats_path, "stats.json"), "w") as f:
json.dump(
stats, f, ensure_ascii=False, sort_keys=True, indent=2, default=default
)
@outer_stage
class StageCleanup(Stage):
def apply(self, env: Env):
logger.info(
f"osm2ft files will be moved from {env.paths.mwm_path} "
f"to {env.paths.osm2ft_path}."
)
for x in os.listdir(env.paths.mwm_path):
p = os.path.join(env.paths.mwm_path, x)
if os.path.isfile(p) and x.endswith(".mwm.osm2ft"):
shutil.move(p, os.path.join(env.paths.osm2ft_path, x))
logger.info(f"{env.paths.draft_path} will be removed.")
shutil.rmtree(env.paths.draft_path)
stages.init()
|
apache-2.0
|
matslindh/4096
|
4096/engine.py
|
1
|
4159
|
import random
class Engine:
def __init__(self):
self.board = [[0 for i in range(4)] for i in range(4)]
self.score = 0
self.add_random_block()
self.add_random_block()
def left(self):
self.next(1)
def right(self):
self.next(3)
def up(self):
self.next(2)
def down(self):
self.next()
def rotate_board(self, board, count):
for c in range(0, count):
rotated = [[0 for i in range(len(board))] for i in range(len(board[0]))]
rows = len(board)
for row_idx in range(0, rows):
columns = len(board[row_idx])
for el_idx in range(0, columns):
rotated[columns - el_idx - 1][row_idx] = board[row_idx][el_idx]
board = rotated
return rotated
def next(self, rotate_count = 0):
board = self.board
if rotate_count:
board = self.rotate_board(board, rotate_count)
merged = [[0 for i in range(len(board[0]))] for i in range(len(board))]
for row_idx in range(0, len(board) - 1):
for el_idx in range(0, len(board[row_idx])):
if merged[row_idx][el_idx]:
continue
current_cell = board[row_idx][el_idx]
next_cell = board[row_idx+1][el_idx]
if not current_cell:
continue
if current_cell == next_cell:
board[row_idx+1][el_idx] *= 2
board[row_idx][el_idx] = 0
merged[row_idx+1][el_idx] = 1
self.score += self.score_bonus_value(current_cell)
elif not next_cell:
board[row_idx+1][el_idx] = current_cell
board[row_idx][el_idx] = 0
if rotate_count:
board = self.rotate_board(board, 4 - rotate_count)
self.board = board
self.add_random_block()
def add_random_block(self, val=None):
avail = self.available_spots()
if avail:
(row, column) = avail[random.randint(0, len(avail) - 1)]
self.board[row][column] = 4 if random.randint(0,8) == 8 else 2
self.score += self.board[row][column]
def available_spots(self):
spots = []
for (row_idx, row) in enumerate(self.board):
for (el_idx, el) in enumerate(row):
if el == 0:
spots.append((row_idx, el_idx))
return spots
def is_board_locked(self):
if self.available_spots():
return False
board = self.board
for row_idx in range(0, len(board) - 1):
for el_idx in range(0, len(board[row_idx])):
if board[row_idx][el_idx] == board[row_idx+1][el_idx]:
return False
for row_idx in range(0, len(board)):
for el_idx in range(0, len(board[row_idx]) - 1):
if board[row_idx][el_idx] == board[row_idx][el_idx+1]:
return False
return True
def score_bonus_value(self, val):
score = {
2: 2,
4: 5,
8: 10,
16: 25,
32: 50,
64: 125,
128: 250,
256: 500,
512: 1000,
1024: 2000,
2048: 4000,
4096: 8000,
8192: 16000,
16384: 32500,
}
if val in score:
return score[val]
# too high, lets just .. be happy for them.
return val*2
def to_string(self):
s = ""
for row in self.board:
s += ' '.join(map(str, row)) + "\n"
s += "== " + str(self.score) + "\n"
return s
"""eng = Engine()
eng.add_random_block()
eng.add_random_block()
eng.print_board()
while True:
key = input('u / d / l / r: ').strip()
if key == 'u':
eng.up()
elif key == 'd':
eng.down()
elif key == 'l':
eng.left()
elif key == 'r':
eng.right()
else:
continue
eng.print_board()
"""
|
mit
|
emdyp/bl_pass
|
blockchainpassport/wsgi/blockchainpassport/blockchainpassport/urls.py
|
3
|
1320
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib.auth.decorators import login_required
from registers.views import idview, idListView, pdfview
from citizens.views import indexView, CitizenCreateView
#basic patterns
urlpatterns = patterns('',
url(r'^$', indexView),
url(r'^admin/', include(admin.site.urls)),
url(r'^id/(?P<idNumber>.*)$', idview),
url(r'^pdf/(?P<idNumber>.*)$', pdfview),
url(r'^registrate/', CitizenCreateView.as_view()),
url(r'^attendees/$', idListView.as_view(), name='id-list'),
)
if 'grappelli' in settings.INSTALLED_APPS:
urlpatterns += patterns('', url(r'grappelli/', include('grappelli.urls')),)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
#urlpatterns += patterns('ext_encuesta.views', (r'^registers', 'main'))
#if settings.DEBUG:
#urlpatterns += static(settings.STATIC_URL,
#document_root=settings.STATIC_ROOT)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
##static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
gpl-2.0
|
rintoj/ai
|
demo/reinforcement/featureExtractors.py
|
5
|
2981
|
# featureExtractors.py
# --------------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero ([email protected]) and Dan Klein ([email protected]).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
"Feature extractors for Pacman game states"
from game import Directions, Actions
import util
class FeatureExtractor:
def getFeatures(self, state, action):
"""
Returns a dict from features to counts
Usually, the count will just be 1.0 for
indicator functions.
"""
util.raiseNotDefined()
class IdentityExtractor(FeatureExtractor):
def getFeatures(self, state, action):
feats = util.Counter()
feats[(state,action)] = 1.0
return feats
def closestFood(pos, food, walls):
"""
closestFood -- this is similar to the function that we have
worked on in the search project; here its all in one place
"""
fringe = [(pos[0], pos[1], 0)]
expanded = set()
while fringe:
pos_x, pos_y, dist = fringe.pop(0)
if (pos_x, pos_y) in expanded:
continue
expanded.add((pos_x, pos_y))
# if we find a food at this location then exit
if food[pos_x][pos_y]:
return dist
# otherwise spread out from the location to its neighbours
nbrs = Actions.getLegalNeighbors((pos_x, pos_y), walls)
for nbr_x, nbr_y in nbrs:
fringe.append((nbr_x, nbr_y, dist+1))
# no food found
return None
class SimpleExtractor(FeatureExtractor):
"""
Returns simple features for a basic reflex Pacman:
- whether food will be eaten
- how far away the next food is
- whether a ghost collision is imminent
- whether a ghost is one step away
"""
def getFeatures(self, state, action):
# extract the grid of food and wall locations and get the ghost locations
food = state.getFood()
walls = state.getWalls()
ghosts = state.getGhostPositions()
features = util.Counter()
features["bias"] = 1.0
# compute the location of pacman after he takes the action
x, y = state.getPacmanPosition()
dx, dy = Actions.directionToVector(action)
next_x, next_y = int(x + dx), int(y + dy)
# count the number of ghosts 1-step away
features["#-of-ghosts-1-step-away"] = sum((next_x, next_y) in Actions.getLegalNeighbors(g, walls) for g in ghosts)
# if there is no danger of ghosts then add the food feature
if not features["#-of-ghosts-1-step-away"] and food[next_x][next_y]:
features["eats-food"] = 1.0
dist = closestFood((next_x, next_y), food, walls)
if dist is not None:
# make the distance a number less than one otherwise the update
# will diverge wildly
features["closest-food"] = float(dist) / (walls.width * walls.height)
features.divideAll(10.0)
return features
|
mit
|
shennushi/electron
|
script/lib/github.py
|
200
|
2214
|
#!/usr/bin/env python
import json
import os
import re
import sys
REQUESTS_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..',
'vendor', 'requests'))
sys.path.append(os.path.join(REQUESTS_DIR, 'build', 'lib'))
sys.path.append(os.path.join(REQUESTS_DIR, 'build', 'lib.linux-x86_64-2.7'))
import requests
GITHUB_URL = 'https://api.github.com'
GITHUB_UPLOAD_ASSET_URL = 'https://uploads.github.com'
class GitHub:
def __init__(self, access_token):
self._authorization = 'token %s' % access_token
pattern = '^/repos/{0}/{0}/releases/{1}/assets$'.format('[^/]+', '[0-9]+')
self._releases_upload_api_pattern = re.compile(pattern)
def __getattr__(self, attr):
return _Callable(self, '/%s' % attr)
def send(self, method, path, **kw):
if not 'headers' in kw:
kw['headers'] = dict()
headers = kw['headers']
headers['Authorization'] = self._authorization
headers['Accept'] = 'application/vnd.github.manifold-preview'
# Switch to a different domain for the releases uploading API.
if self._releases_upload_api_pattern.match(path):
url = '%s%s' % (GITHUB_UPLOAD_ASSET_URL, path)
else:
url = '%s%s' % (GITHUB_URL, path)
# Data are sent in JSON format.
if 'data' in kw:
kw['data'] = json.dumps(kw['data'])
r = getattr(requests, method)(url, **kw).json()
if 'message' in r:
raise Exception(json.dumps(r, indent=2, separators=(',', ': ')))
return r
class _Executable:
def __init__(self, gh, method, path):
self._gh = gh
self._method = method
self._path = path
def __call__(self, **kw):
return self._gh.send(self._method, self._path, **kw)
class _Callable(object):
def __init__(self, gh, name):
self._gh = gh
self._name = name
def __call__(self, *args):
if len(args) == 0:
return self
name = '%s/%s' % (self._name, '/'.join([str(arg) for arg in args]))
return _Callable(self._gh, name)
def __getattr__(self, attr):
if attr in ['get', 'put', 'post', 'patch', 'delete']:
return _Executable(self._gh, attr, self._name)
name = '%s/%s' % (self._name, attr)
return _Callable(self._gh, name)
|
mit
|
MrNuggles/HeyBoet-Telegram-Bot
|
temboo/Library/Google/Picasa/DeleteTag.py
|
5
|
5429
|
# -*- coding: utf-8 -*-
###############################################################################
#
# DeleteTag
# Removes a tag from a specified photo in Google Picasa.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteTag(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteTag Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeleteTag, self).__init__(temboo_session, '/Library/Google/Picasa/DeleteTag')
def new_input_set(self):
return DeleteTagInputSet()
def _make_result_set(self, result, path):
return DeleteTagResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteTagChoreographyExecution(session, exec_id, path)
class DeleteTagInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteTag
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(DeleteTagInputSet, self)._set_input('AccessToken', value)
def set_AlbumId(self, value):
"""
Set the value of the AlbumId input for this Choreo. ((required, integer) The id for the album which has the tagged photo. Note that this can be retrieved by running the ListAlbums Choreo.)
"""
super(DeleteTagInputSet, self)._set_input('AlbumId', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(DeleteTagInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(DeleteTagInputSet, self)._set_input('ClientSecret', value)
def set_PhotoID(self, value):
"""
Set the value of the PhotoID input for this Choreo. ((required, integer) The id for the photo that has a tag to delete.)
"""
super(DeleteTagInputSet, self)._set_input('PhotoID', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth Refresh Token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(DeleteTagInputSet, self)._set_input('RefreshToken', value)
def set_TagID(self, value):
"""
Set the value of the TagID input for this Choreo. ((required, string) The Id (or tag name) for the tag that you want to delete.)
"""
super(DeleteTagInputSet, self)._set_input('TagID', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) Google Picasa username. Defaults to 'default' which means the server will use the UserID of the user whose access token was specified.)
"""
super(DeleteTagInputSet, self)._set_input('UserID', value)
class DeleteTagResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteTag Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Google Picasa.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class DeleteTagChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteTagResultSet(response, path)
|
gpl-3.0
|
pavelchristof/gomoku-ai
|
tensorflow/contrib/labeled_tensor/python/ops/sugar_test.py
|
157
|
4205
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import ops
from tensorflow.contrib.labeled_tensor.python.ops import sugar
from tensorflow.contrib.labeled_tensor.python.ops import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class Base(test_util.Base):
def setUp(self):
super(Base, self).setUp()
self.small_lt = core.LabeledTensor(constant_op.constant([1]), [('x', 1)])
class ReshapeCoderTest(Base):
def setUp(self):
super(ReshapeCoderTest, self).setUp()
self.batch_size = 8
self.num_rows = 50
self.num_columns = 100
self.channels = ['red', 'green', 'blue']
self.masks = [False, True]
tensor = math_ops.range(0,
self.batch_size * self.num_rows * self.num_columns *
len(self.channels) * len(self.masks))
tensor = array_ops.reshape(tensor, [
self.batch_size, self.num_rows, self.num_columns, len(self.channels),
len(self.masks)
])
self.batch_axis = ('batch', range(self.batch_size))
self.row_axis = ('row', range(self.num_rows))
self.column_axis = ('column', range(self.num_columns))
self.channel_axis = ('channel', self.channels)
self.mask_axis = ('mask', self.masks)
axes = [
self.batch_axis, self.row_axis, self.column_axis, self.channel_axis,
self.mask_axis
]
self.masked_image_lt = core.LabeledTensor(tensor, axes)
def test_name(self):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
encode_lt = rc.encode(self.masked_image_lt)
decode_lt = rc.decode(encode_lt)
self.assertIn('lt_reshape_encode', encode_lt.name)
self.assertIn('lt_reshape_decode', decode_lt.name)
def test_bijection_flat(self):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
encode_lt = rc.encode(self.masked_image_lt)
golden_axes = core.Axes([
self.batch_axis, self.row_axis, self.column_axis,
('depth', len(self.channels) * len(self.masks))
])
self.assertEqual(encode_lt.axes, golden_axes)
decode_lt = rc.decode(encode_lt)
self.assertLabeledTensorsEqual(decode_lt, self.masked_image_lt)
def test_bijection_with_labels(self):
depth_axis = core.Axis('depth', range(len(self.channels) * len(self.masks)))
rc = sugar.ReshapeCoder(['channel', 'mask'],
[depth_axis, ('other', ['label'])])
encode_lt = rc.encode(self.masked_image_lt)
golden_axes = core.Axes([
self.batch_axis, self.row_axis, self.column_axis, depth_axis,
('other', ['label'])
])
self.assertEqual(encode_lt.axes, golden_axes)
decode_lt = rc.decode(encode_lt)
self.assertLabeledTensorsEqual(decode_lt, self.masked_image_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
rc.decode(self.masked_image_lt)
with self.assertRaises(ValueError):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
rc.encode(self.masked_image_lt)
rc.encode(ops.select(self.masked_image_lt, {'channel': 'red'}))
if __name__ == '__main__':
test.main()
|
apache-2.0
|
jvkops/django
|
tests/template_tests/filter_tests/test_escape.py
|
324
|
1495
|
from django.template.defaultfilters import escape
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class EscapeTests(SimpleTestCase):
"""
The "escape" filter works the same whether autoescape is on or off,
but it has no effect on strings already marked as safe.
"""
@setup({'escape01': '{{ a|escape }} {{ b|escape }}'})
def test_escape01(self):
output = self.engine.render_to_string('escape01', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "x&y x&y")
@setup({'escape02': '{% autoescape off %}{{ a|escape }} {{ b|escape }}{% endautoescape %}'})
def test_escape02(self):
output = self.engine.render_to_string('escape02', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "x&y x&y")
# It is only applied once, regardless of the number of times it
# appears in a chain.
@setup({'escape03': '{% autoescape off %}{{ a|escape|escape }}{% endautoescape %}'})
def test_escape03(self):
output = self.engine.render_to_string('escape03', {"a": "x&y"})
self.assertEqual(output, "x&y")
@setup({'escape04': '{{ a|escape|escape }}'})
def test_escape04(self):
output = self.engine.render_to_string('escape04', {"a": "x&y"})
self.assertEqual(output, "x&y")
class FunctionTests(SimpleTestCase):
def test_non_string_input(self):
self.assertEqual(escape(123), '123')
|
bsd-3-clause
|
jjmleiro/hue
|
desktop/core/ext-py/Django-1.6.10/django/contrib/flatpages/tests/test_csrf.py
|
106
|
3433
|
import os
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.test import TestCase, Client
from django.test.utils import override_settings
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), 'templates'),
),
SITE_ID=1,
)
class FlatpageCSRFTests(TestCase):
fixtures = ['sample_flatpages', 'example_site']
urls = 'django.contrib.flatpages.tests.urls'
def setUp(self):
self.client = Client(enforce_csrf_checks=True)
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
@skipIfCustomUser
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser',password='s3krit')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middlware"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middlware"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_post_view_flatpage(self):
"POSTing to a flatpage served through a view will raise a CSRF error if no token is provided (Refs #14156)"
response = self.client.post('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 403)
def test_post_fallback_flatpage(self):
"POSTing to a flatpage served by the middleware will raise a CSRF error if no token is provided (Refs #14156)"
response = self.client.post('/flatpage/')
self.assertEqual(response.status_code, 403)
def test_post_unknown_page(self):
"POSTing to an unknown page isn't caught as a 403 CSRF error"
response = self.client.post('/no_such_page/')
self.assertEqual(response.status_code, 404)
|
apache-2.0
|
xztor/EulerProject
|
problems/common/utils.py
|
1
|
1303
|
# Common routines to deal with primes, factors et al.
import operator
import itertools
def isqrt(n):
x = n
y = (x + 1) // 2
while y < x:
x = y
y = (x + n // x) // 2
return x
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@static_vars(m_known=10, primes_known=set([2,3,5,7]))
def primes_until(m):
if m <= primes_until.m_known:
return set(x for x in primes_until.primes_known if x <= m)
else:
p = reduce(operator.sub,
(set(range(2*k,m+1,k))
for k in primes_until(isqrt(m))), set([2]+range(3,m,2)))
primes_until.m_known=m
primes_until.primes_known = p
return p
def isprime(n):
s = isqrt(n)
return all(n % p <> 0 for p in primes_until(s))
def factors(n):
if n == 1:
return set([1])
p = sorted(primes_until(isqrt(n)), reverse=True)
for f in sorted(primes_until(isqrt(n)), reverse=True):
if n % f == 0:
ff = factors(n/f)
fff = set()
for x in ff:
fff.add(f*x)
return ff | fff | set([f, n])
return set([1, n])
def modexp(b, x, m):
r = 1
for i in bin(x)[2:]:
r = r * r
if i == '1':
r = r * b
r = r % m
return r
def gcd_(a, b):
return a if b == 0 else gcd_(b, a%b)
def gcd(a,b):
return gcd_(abs(a), abs(b))
|
mit
|
SantosDevelopers/sborganicos
|
venv/lib/python3.5/site-packages/pip/_vendor/html5lib/_trie/datrie.py
|
1301
|
1178
|
from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from pip._vendor.six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
|
mit
|
mozilla/MozDef
|
tests/mq/plugins/test_ldap_fixup.py
|
1
|
4316
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from mq.plugins.ldap_fixup import message
class TestLdapFixupPlugin():
def setup(self):
self.plugin = message()
def test_ldap_fixup_plugin(self):
msg = {
'summary': 'LDAP-Humanizer:45582:1.1.1.1',
'hostname': 'random.host.com',
'category': 'ldap',
'details': {
'tls': 'true',
'authenticated': 'true',
'actor': 'o=com,[email protected],dc=mozilla'
}
}
(retmessage, retmeta) = self.plugin.onMessage(msg, {})
expected_message = {
'summary': 'LDAP-Humanizer:45582:1.1.1.1',
'hostname': 'random.host.com',
'category': 'ldap',
'source': 'ldap',
'details': {
'tls_encrypted': 'true',
'authenticated': 'true',
'email': '[email protected]',
'username': 'tester',
'actor': 'o=com,[email protected],dc=mozilla'
}
}
assert retmessage == expected_message
assert retmeta == {}
def test_ldap_fixup_complex_actor_format(self):
msg = {
'summary': 'LDAP-Humanizer:45582:1.1.1.1',
'hostname': 'random.host.com',
'category': 'ldap',
'details': {
'tls': 'true',
'authenticated': 'true',
'actor': 'dc=mozilla [email protected],o=com,dc=mozilla '
'IP=123.45.67.89:46740 conn=180255',
}
}
expected = {
'summary': 'LDAP-Humanizer:45582:1.1.1.1',
'hostname': 'random.host.com',
'category': 'ldap',
'source': 'ldap',
'details': {
'tls_encrypted': 'true',
'authenticated': 'true',
'email': '[email protected]',
'username': 'tester',
'actor': 'dc=mozilla [email protected],o=com,dc=mozilla '
'IP=123.45.67.89:46740 conn=180255',
}
}
(retmessage, retmeta) = self.plugin.onMessage(msg, {})
assert retmessage == expected
assert retmeta == {}
def test_ldap_fixup_missing_actor(self):
msg = {
'summary': 'LDAP-Humanizer:45582:1.1.1.1',
'hostname': 'random.host.com',
'category': 'ldap',
'details': {
'tls': 'true',
'authenticated': 'true',
}
}
(retmessage, retmeta) = self.plugin.onMessage(msg, {})
assert retmessage['details'].get('email') is None
assert retmessage['details'].get('username') is None
def test_ldap_fixup_poorly_formatted_actor(self):
msgs = [
{
'summary': 'LDAP-Humanizer:45582:1.1.1.1',
'hostname': 'random.host.com',
'category': 'ldap',
'details': {
'tls': 'true',
'authenticated': 'true',
'actor': 'o=com=extra,[email protected]=extra2',
}
},
{
'summary': 'LDAP-Humanizer:45582:1.1.1.1',
'hostname': 'random.host.com',
'category': 'ldap',
'details': {
'tls': 'true',
'authenticated': 'true',
'actor': 'o=com,',
}
},
{
'summary': 'LDAP-Humanizer:45582:1.1.1.1',
'hostname': 'random.host.com',
'category': 'ldap',
'details': {
'tls': 'true',
'authenticated': 'true',
'actor': 'o,mail',
}
}
]
for msg in msgs:
(retmessage, retmeta) = self.plugin.onMessage(msg, {})
assert retmessage['details'].get('email') is None
assert retmessage['details'].get('username') is None
|
mpl-2.0
|
mgit-at/ansible
|
lib/ansible/modules/cloud/openstack/os_security_group_rule.py
|
26
|
12013
|
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_security_group_rule
short_description: Add/Delete rule from an existing security group
author: "Benno Joy (@bennojoy)"
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Add or Remove rule from an existing security group
options:
security_group:
description:
- Name or ID of the security group
required: true
protocol:
description:
- IP protocols TCP UDP ICMP 112 (VRRP)
choices: ['tcp', 'udp', 'icmp', '112', None]
port_range_min:
description:
- Starting port
port_range_max:
description:
- Ending port
remote_ip_prefix:
description:
- Source IP address(es) in CIDR notation (exclusive with remote_group)
remote_group:
description:
- Name or ID of the Security group to link (exclusive with
remote_ip_prefix)
ethertype:
description:
- Must be IPv4 or IPv6, and addresses represented in CIDR must
match the ingress or egress rules. Not all providers support IPv6.
choices: ['IPv4', 'IPv6']
default: IPv4
direction:
description:
- The direction in which the security group rule is applied. Not
all providers support egress.
choices: ['egress', 'ingress']
default: ingress
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
project:
description:
- Unique name or ID of the project.
required: false
version_added: "2.7"
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements: ["openstacksdk"]
'''
EXAMPLES = '''
# Create a security group rule
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
port_range_min: 80
port_range_max: 80
remote_ip_prefix: 0.0.0.0/0
# Create a security group rule for ping
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
remote_ip_prefix: 0.0.0.0/0
# Another way to create the ping rule
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
port_range_min: -1
port_range_max: -1
remote_ip_prefix: 0.0.0.0/0
# Create a TCP rule covering all ports
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
port_range_min: 1
port_range_max: 65535
remote_ip_prefix: 0.0.0.0/0
# Another way to create the TCP rule above (defaults to all ports)
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
remote_ip_prefix: 0.0.0.0/0
# Create a rule for VRRP with numbered protocol 112
- os_security_group_rule:
security_group: loadbalancer_sg
protocol: 112
remote_group: loadbalancer-node_sg
# Create a security group rule for a given project
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
remote_ip_prefix: 0.0.0.0/0
project: myproj
'''
RETURN = '''
id:
description: Unique rule UUID.
type: string
returned: state == present
direction:
description: The direction in which the security group rule is applied.
type: string
sample: 'egress'
returned: state == present
ethertype:
description: One of IPv4 or IPv6.
type: string
sample: 'IPv4'
returned: state == present
port_range_min:
description: The minimum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
returned: state == present
port_range_max:
description: The maximum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
returned: state == present
protocol:
description: The protocol that is matched by the security group rule.
type: string
sample: 'tcp'
returned: state == present
remote_ip_prefix:
description: The remote IP prefix to be associated with this security group rule.
type: string
sample: '0.0.0.0/0'
returned: state == present
security_group_id:
description: The security group ID to associate with this security group rule.
type: string
returned: state == present
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _ports_match(protocol, module_min, module_max, rule_min, rule_max):
"""
Capture the complex port matching logic.
The port values coming in for the module might be -1 (for ICMP),
which will work only for Nova, but this is handled by sdk. Likewise,
they might be None, which works for Neutron, but not Nova. This too is
handled by sdk. Since sdk will consistently return these port
values as None, we need to convert any -1 values input to the module
to None here for comparison.
For TCP and UDP protocols, None values for both min and max are
represented as the range 1-65535 for Nova, but remain None for
Neutron. sdk returns the full range when Nova is the backend (since
that is how Nova stores them), and None values for Neutron. If None
values are input to the module for both values, then we need to adjust
for comparison.
"""
# Check if the user is supplying -1 for ICMP.
if protocol == 'icmp':
if module_min and int(module_min) == -1:
module_min = None
if module_max and int(module_max) == -1:
module_max = None
# Check if the user is supplying -1 or None values for full TPC/UDP port range.
if protocol in ['tcp', 'udp'] or protocol is None:
if module_min and module_max and int(module_min) == int(module_max) == -1:
module_min = None
module_max = None
if ((module_min is None and module_max is None) and
(rule_min and int(rule_min) == 1 and
rule_max and int(rule_max) == 65535)):
# (None, None) == (1, 65535)
return True
# Sanity check to make sure we don't have type comparison issues.
if module_min:
module_min = int(module_min)
if module_max:
module_max = int(module_max)
if rule_min:
rule_min = int(rule_min)
if rule_max:
rule_max = int(rule_max)
return module_min == rule_min and module_max == rule_max
def _find_matching_rule(module, secgroup, remotegroup):
"""
Find a rule in the group that matches the module parameters.
:returns: The matching rule dict, or None if no matches.
"""
protocol = module.params['protocol']
remote_ip_prefix = module.params['remote_ip_prefix']
ethertype = module.params['ethertype']
direction = module.params['direction']
remote_group_id = remotegroup['id']
for rule in secgroup['security_group_rules']:
if (protocol == rule['protocol']
and remote_ip_prefix == rule['remote_ip_prefix']
and ethertype == rule['ethertype']
and direction == rule['direction']
and remote_group_id == rule['remote_group_id']
and _ports_match(protocol,
module.params['port_range_min'],
module.params['port_range_max'],
rule['port_range_min'],
rule['port_range_max'])):
return rule
return None
def _system_state_change(module, secgroup, remotegroup):
state = module.params['state']
if secgroup:
rule_exists = _find_matching_rule(module, secgroup, remotegroup)
else:
return False
if state == 'present' and not rule_exists:
return True
if state == 'absent' and rule_exists:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
security_group=dict(required=True),
# NOTE(Shrews): None is an acceptable protocol value for
# Neutron, but Nova will balk at this.
protocol=dict(default=None,
choices=[None, 'tcp', 'udp', 'icmp', '112']),
port_range_min=dict(required=False, type='int'),
port_range_max=dict(required=False, type='int'),
remote_ip_prefix=dict(required=False, default=None),
remote_group=dict(required=False, default=None),
ethertype=dict(default='IPv4',
choices=['IPv4', 'IPv6']),
direction=dict(default='ingress',
choices=['egress', 'ingress']),
state=dict(default='present',
choices=['absent', 'present']),
project=dict(default=None),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['remote_ip_prefix', 'remote_group'],
]
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
security_group = module.params['security_group']
remote_group = module.params['remote_group']
project = module.params['project']
changed = False
sdk, cloud = openstack_cloud_from_module(module)
try:
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
else:
project_id = cloud.current_project_id
if project_id:
filters = {'tenant_id': project_id}
else:
filters = None
secgroup = cloud.get_security_group(security_group, filters=filters)
if remote_group:
remotegroup = cloud.get_security_group(remote_group,
filters=filters)
else:
remotegroup = {'id': None}
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup, remotegroup))
if state == 'present':
if not secgroup:
module.fail_json(msg='Could not find security group %s' %
security_group)
rule = _find_matching_rule(module, secgroup, remotegroup)
if not rule:
kwargs = {}
if project_id:
kwargs['project_id'] = project_id
rule = cloud.create_security_group_rule(
secgroup['id'],
port_range_min=module.params['port_range_min'],
port_range_max=module.params['port_range_max'],
protocol=module.params['protocol'],
remote_ip_prefix=module.params['remote_ip_prefix'],
remote_group_id=remotegroup['id'],
direction=module.params['direction'],
ethertype=module.params['ethertype'],
**kwargs
)
changed = True
module.exit_json(changed=changed, rule=rule, id=rule['id'])
if state == 'absent' and secgroup:
rule = _find_matching_rule(module, secgroup, remotegroup)
if rule:
cloud.delete_security_group_rule(rule['id'])
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
gpl-3.0
|
phenix3443/shadowsocks
|
shadowsocks/udprelay.py
|
924
|
11154
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import encrypt, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
frag = common.ord(data[2])
if frag != 0:
logging.warn('drop a message since frag is not 0')
return
else:
data = data[3:]
else:
data = encrypt.encrypt_all(self._password, self._method, 0, data)
# decrypt data
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
data = encrypt.encrypt_all(self._password, self._method, 1, data)
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
response = encrypt.encrypt_all(self._password, self._method, 1,
data)
if not response:
return
else:
data = encrypt.encrypt_all(self._password, self._method, 0,
data)
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
# addrtype, dest_addr, dest_port, header_length = header_result
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
|
apache-2.0
|
Belxjander/Kirito
|
Python-3.5.0-main/Tools/pybench/Lookups.py
|
92
|
15254
|
from pybench import Test
class SpecialClassAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
for i in range(self.rounds):
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
def calibrate(self):
class c:
pass
for i in range(self.rounds):
pass
class NormalClassAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
for i in range(self.rounds):
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
def calibrate(self):
class c:
pass
for i in range(self.rounds):
pass
class SpecialInstanceAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
o = c()
for i in range(self.rounds):
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
def calibrate(self):
class c:
pass
o = c()
for i in range(self.rounds):
pass
class NormalInstanceAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
o = c()
for i in range(self.rounds):
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
def calibrate(self):
class c:
pass
o = c()
for i in range(self.rounds):
pass
class BuiltinMethodLookup(Test):
version = 2.0
operations = 5*(3*5 + 3*5)
rounds = 70000
def test(self):
l = []
d = {}
for i in range(self.rounds):
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
# d.has_key
# d.has_key
# d.has_key
# d.has_key
# d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
# d.has_key
# d.has_key
# d.has_key
# d.has_key
# d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
# d.has_key
# d.has_key
# d.has_key
# d.has_key
# d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
# d.has_key
# d.has_key
# d.has_key
# d.has_key
# d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
# d.has_key
# d.has_key
# d.has_key
# d.has_key
# d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
def calibrate(self):
l = []
d = {}
for i in range(self.rounds):
pass
|
gpl-3.0
|
ReganBell/QReview
|
networkx/algorithms/approximation/clustering_coefficient.py
|
41
|
1992
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013 by
# Fred Morstatter <[email protected]>
# Jordi Torrents <[email protected]>
# All rights reserved.
# BSD license.
import random
from networkx.utils import not_implemented_for
__all__ = ['average_clustering']
__author__ = """\n""".join(['Fred Morstatter <[email protected]>',
'Jordi Torrents <[email protected]>'])
@not_implemented_for('directed')
def average_clustering(G, trials=1000):
r"""Estimates the average clustering coefficient of G.
The local clustering of each node in `G` is the fraction of triangles
that actually exist over all possible triangles in its neighborhood.
The average clustering coefficient of a graph `G` is the mean of
local clusterings.
This function finds an approximate average clustering coefficient
for G by repeating `n` times (defined in `trials`) the following
experiment: choose a node at random, choose two of its neighbors
at random, and check if they are connected. The approximate
coefficient is the fraction of triangles found over the number
of trials [1]_.
Parameters
----------
G : NetworkX graph
trials : integer
Number of trials to perform (default 1000).
Returns
-------
c : float
Approximated average clustering coefficient.
References
----------
.. [1] Schank, Thomas, and Dorothea Wagner. Approximating clustering
coefficient and transitivity. Universität Karlsruhe, Fakultät für
Informatik, 2004.
http://www.emis.ams.org/journals/JGAA/accepted/2005/SchankWagner2005.9.2.pdf
"""
n = len(G)
triangles = 0
nodes = G.nodes()
for i in [int(random.random() * n) for i in range(trials)]:
nbrs = list(G[nodes[i]])
if len(nbrs) < 2:
continue
u, v = random.sample(nbrs, 2)
if u in G[v]:
triangles += 1
return triangles / float(trials)
|
bsd-3-clause
|
willingc/oh-mainline
|
vendor/packages/twisted/twisted/persisted/journal/picklelog.py
|
19
|
1189
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
# -*- test-case-name: twisted.test.test_journal -*-
"""Logging that uses pickles.
TODO: add log that logs to a file.
"""
# twisted imports
from twisted.persisted import dirdbm
from twisted.internet import defer
from zope.interface import implements
# sibling imports
import base
class DirDBMLog:
"""Log pickles to DirDBM directory."""
implements(base.ICommandLog)
def __init__(self, logPath):
self.db = dirdbm.Shelf(logPath)
indexs = map(int, self.db.keys())
if indexs:
self.currentIndex = max(indexs)
else:
self.currentIndex = 0
def logCommand(self, command, time):
"""Log a command."""
self.currentIndex += 1
self.db[str(self.currentIndex)] = (time, command)
return defer.succeed(1)
def getCurrentIndex(self):
"""Return index of last command logged."""
return self.currentIndex
def getCommandsSince(self, index):
result = []
for i in range(index, self.currentIndex + 1):
result.append(self.db[str(i)])
return result
|
agpl-3.0
|
chrsrds/scikit-learn
|
sklearn/svm/tests/test_bounds.py
|
3
|
2279
|
import numpy as np
from scipy import sparse as sp
import pytest
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
@pytest.mark.parametrize('loss', ['squared_hinge', 'log'])
@pytest.mark.parametrize('X_label', ['sparse', 'dense'])
@pytest.mark.parametrize('Y_label', ['two-classes', 'multi-class'])
@pytest.mark.parametrize('intercept_label', ['no-intercept', 'fit-intercept'])
def test_l1_min_c(loss, X_label, Y_label, intercept_label):
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
X = Xs[X_label]
Y = Ys[Y_label]
intercept_params = intercepts[intercept_label]
check_l1_min_c(X, Y, loss, **intercept_params)
def test_l1_min_c_l2_loss():
# loss='l2' should raise ValueError
assert_raise_message(ValueError, "loss type not in",
l1_min_c, dense_X, Y1, "l2")
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1', solver='liblinear'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert (np.asarray(clf.coef_) == 0).all()
assert (np.asarray(clf.intercept_) == 0).all()
clf.C = min_c * 1.01
clf.fit(X, y)
assert ((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
assert_raises(ValueError, l1_min_c, X, y)
def test_unsupported_loss():
assert_raises(ValueError, l1_min_c, dense_X, Y1, 'l1')
|
bsd-3-clause
|
examachine/pisi
|
setup.py
|
1
|
4204
|
#!/usr/bin/env python
#
# Copyright (C) 2005, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
# Authors: {eray,gurer}@pardus.org.tr
import os
import shutil
import glob
import sys
import inspect
from distutils.core import setup
from distutils.command.install import install
sys.path.insert(0, '.')
import pisi
i18n_domain = "pisi"
i18n_languages = "tr"
class Install(install):
def run(self):
install.run(self)
self.installi18n()
self.installdoc()
self.generateConfigFile()
def installi18n(self):
for lang in i18n_languages.split(' '):
print "Installing '%s' translations..." % lang
os.popen("msgfmt po/%s.po -o po/%s.mo" % (lang, lang))
if not self.prefix:
self.prefix = "/"
destpath = os.path.join(self.prefix, "usr/share/locale/%s/LC_MESSAGES" % lang)
try:
os.makedirs(destpath)
except:
pass
shutil.copy("po/%s.mo" % lang, os.path.join(destpath, "%s.mo" % i18n_domain))
def installdoc(self):
destpath = os.path.join(self.prefix, "usr/share/doc/pisi")
try:
os.makedirs(destpath)
except:
pass
os.chdir('doc')
for pdf in glob.glob('*.pdf'):
print 'Installing', pdf
shutil.copy(pdf, os.path.join(destpath, pdf))
os.chdir('..')
def generateConfigFile(self):
import pisi.configfile
destpath = os.path.join(self.prefix, "etc/pisi/")
try:
os.makedirs(destpath)
except:
pass
pisiconf = open(os.path.join(destpath, "pisi.conf"), "w")
klasses = inspect.getmembers(pisi.configfile, inspect.isclass)
defaults = [klass for klass in klasses if klass[0].endswith('Defaults')]
for d in defaults:
section_name = d[0][:-len('Defaults')].lower()
pisiconf.write("[%s]\n" % section_name)
section_members = [m for m in inspect.getmembers(d[1]) \
if not m[0].startswith('__') \
and not m[0].endswith('__')]
for member in section_members:
pisiconf.write("# %s = %s\n" % (member[0], member[1]))
pisiconf.write('\n')
setup(name="pisi",
version= pisi.__version__,
description="PISI (Packages Installed Successfully as Intended)",
long_description="PISI is the package management system of Pardus Linux.",
license="GNU AGPL-3.0",
author="Eray Ozkural, Baris Metin, S. Caglar Onur, Murat Eren, Gurer Ozen and contributors",
author_email="[email protected]",
url="https://github.com/examachine/pisi",
package_dir = {'': ''},
packages = ['pisi', 'pisi.util', 'pisi.db', 'pisi.exml', 'pisi.data',
'pisi.op', 'pisi.cli', 'pisi.actionsapi', 'pisi.search'],
scripts = ['pisi-cli', 'scripts/repostats.py', 'scripts/find-lib-deps.py',
'scripts/lspisi', 'scripts/unpisi',
'scripts/calc-build-order.py', 'scripts/pisish', 'scripts/pisimedia'],
data_files = [ ('etc/pisi', ['etc/mirrors.conf']) ],
cmdclass = {
'install' : Install
}
)
# the below stuff is really nice but we already have a version
# we can use this stuff for svn snapshots in a separate
# script, or with a parameter I don't know -- exa
PISI_VERSION = pisi.__version__
def getRevision():
import os
try:
p = os.popen("svn info 2> /dev/null")
for line in p.readlines():
line = line.strip()
if line.startswith("Revision:"):
return line.split(":")[1].strip()
except:
pass
# doesn't working in a Subversion directory
return None
def getVersion():
rev = getRevision()
if rev:
return "-r".join([PISI_VERSION, rev])
else:
return PISI_VERSION
|
gpl-3.0
|
NL66278/odoo
|
addons/l10n_ca/__openerp__.py
|
161
|
3167
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Canada - Accounting',
'version': '1.2',
'author': 'Savoir-faire Linux',
'website': 'http://www.savoirfairelinux.com',
'category': 'Localization/Account Charts',
'description': """
This is the module to manage the English and French - Canadian accounting chart in OpenERP.
===========================================================================================
Canadian accounting charts and localizations.
Fiscal positions
----------------
When considering taxes to be applied, it is the province where the delivery occurs that matters.
Therefore we decided to implement the most common case in the fiscal positions: delivery is the
responsibility of the supplier and done at the customer location.
Some examples:
1) You have a customer from another province and you deliver to his location.
On the customer, set the fiscal position to his province.
2) You have a customer from another province. However this customer comes to your location
with their truck to pick up products. On the customer, do not set any fiscal position.
3) An international supplier doesn't charge you any tax. Taxes are charged at customs
by the customs broker. On the supplier, set the fiscal position to International.
4) An international supplier charge you your provincial tax. They are registered with your
provincial government and remit taxes themselves. On the supplier, do not set any fiscal
position.
""",
'depends': [
'base',
'account',
'base_iban',
'base_vat',
'account_chart',
'account_anglo_saxon'
],
'data': [
'account_chart_en.xml',
'account_tax_code_en.xml',
'account_chart_template_en.xml',
'account_tax_en.xml',
'fiscal_templates_en.xml',
'account_chart_fr.xml',
'account_tax_code_fr.xml',
'account_chart_template_fr.xml',
'account_tax_fr.xml',
'fiscal_templates_fr.xml',
'l10n_ca_wizard.xml'
],
'demo': [],
'installable': True,
'images': ['images/config_chart_l10n_ca.jpeg','images/l10n_ca_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
shikhardb/scikit-learn
|
examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py
|
227
|
5170
|
"""
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
|
bsd-3-clause
|
astaff/ansible
|
lib/ansible/utils/module_docs_fragments/openstack.py
|
118
|
4021
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard openstack documentation fragment
DOCUMENTATION = '''
options:
cloud:
description:
- Named cloud to operate against. Provides default values for I(auth) and
I(auth_type). This parameter is not needed if I(auth) is provided or if
OpenStack OS_* environment variables are present.
required: false
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
plugin strategy. For the default I(password) plugin, this would contain
I(auth_url), I(username), I(password), I(project_name) and any
information about domains if the cloud supports them. For other plugins,
this param will need to contain whatever parameters that auth plugin
requires. This parameter is not needed if a named cloud is provided or
OpenStack OS_* environment variables are present.
required: false
auth_type:
description:
- Name of the auth plugin to use. If the cloud uses something other than
password authentication, the name of the plugin should be indicated here
and the contents of the I(auth) parameter should be updated accordingly.
required: false
default: password
region_name:
description:
- Name of the region.
required: false
availability_zone:
description:
- Name of the availability zone.
required: false
wait:
description:
- Should ansible wait until the requested resource is complete.
required: false
default: "yes"
choices: ["yes", "no"]
timeout:
description:
- How long should ansible wait for the requested resource.
required: false
default: 180
api_timeout:
description:
- How long should the socket layer wait before timing out for API calls.
If this is omitted, nothing will be passed to the requests library.
required: false
default: None
validate_certs:
description:
- Whether or not SSL API requests should be verified.
required: false
default: True
aliases: ['verify']
cacert:
description:
- A path to a CA Cert bundle that can be used as part of verifying
SSL API requests.
required: false
default: None
cert:
description:
- A path to a client certificate to use as part of the SSL transaction
required: false
default: None
key:
description:
- A path to a client key to use as part of the SSL transaction
required: false
default: None
endpoint_type:
description:
- Endpoint URL type to fetch from the service catalog.
choices: [public, internal, admin]
required: false
default: public
requirements:
- python >= 2.7
- shade
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be used instead of providing explicit values.
- Auth information is driven by os-client-config, which means that values
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
standard environment variables, then finally by explicit parameters in
plays. More information can be found at
U(http://docs.openstack.org/developer/os-client-config)
'''
|
gpl-3.0
|
hurricup/intellij-community
|
python/lib/Lib/glob.py
|
92
|
2010
|
"""Filename globbing utility."""
import os
import fnmatch
import re
__all__ = ["glob", "iglob"]
def glob(pathname):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
return list(iglob(pathname))
def iglob(pathname):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
if not has_magic(pathname):
if os.path.lexists(pathname):
yield pathname
return
dirname, basename = os.path.split(pathname)
if not dirname:
for name in glob1(os.curdir, basename):
yield name
return
if has_magic(dirname):
dirs = iglob(dirname)
else:
dirs = [dirname]
if has_magic(basename):
glob_in_dir = glob1
else:
glob_in_dir = glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename):
yield os.path.join(dirname, name)
# These 2 helper functions non-recursively glob inside a literal directory.
# They return a list of basenames. `glob1` accepts a pattern while `glob0`
# takes a literal basename (so it only has to check for its existence).
def glob1(dirname, pattern):
if not dirname:
dirname = os.curdir
try:
names = os.listdir(dirname)
except os.error:
return []
if pattern[0]!='.':
names=filter(lambda x: x[0]!='.',names)
return fnmatch.filter(names,pattern)
def glob0(dirname, basename):
if basename == '':
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if os.path.isdir(dirname):
return [basename]
else:
if os.path.lexists(os.path.join(dirname, basename)):
return [basename]
return []
magic_check = re.compile('[*?[]')
def has_magic(s):
return magic_check.search(s) is not None
|
apache-2.0
|
stelligent/ansible-modules-core
|
cloud/rackspace/rax.py
|
133
|
32841
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax
short_description: create / delete an instance in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud instance and optionally
waits for it to be 'running'.
version_added: "1.2"
options:
auto_increment:
description:
- Whether or not to increment a single number with the name of the
created servers. Only applicable when used with the I(group) attribute
or meta key.
default: yes
choices:
- "yes"
- "no"
version_added: 1.5
boot_from_volume:
description:
- Whether or not to boot the instance from a Cloud Block Storage volume.
If C(yes) and I(image) is specified a new volume will be created at
boot time. I(boot_volume_size) is required with I(image) to create a
new volume at boot time.
default: "no"
choices:
- "yes"
- "no"
version_added: 1.9
boot_volume:
description:
- Cloud Block Storage ID or Name to use as the boot volume of the
instance
version_added: 1.9
boot_volume_size:
description:
- Size of the volume to create in Gigabytes. This is only required with
I(image) and I(boot_from_volume).
default: 100
version_added: 1.9
boot_volume_terminate:
description:
- Whether the I(boot_volume) or newly created volume from I(image) will
be terminated when the server is terminated
default: false
version_added: 1.9
config_drive:
description:
- Attach read-only configuration drive to server as label config-2
default: no
choices:
- "yes"
- "no"
version_added: 1.7
count:
description:
- number of instances to launch
default: 1
version_added: 1.4
count_offset:
description:
- number count to start at
default: 1
version_added: 1.4
disk_config:
description:
- Disk partitioning strategy
choices:
- auto
- manual
version_added: '1.4'
default: auto
exact_count:
description:
- Explicitly ensure an exact count of instances, used with
state=active/present. If specified as C(yes) and I(count) is less than
the servers matched, servers will be deleted to match the count. If
the number of matched servers is fewer than specified in I(count)
additional servers will be added.
default: no
choices:
- "yes"
- "no"
version_added: 1.4
extra_client_args:
description:
- A hash of key/value pairs to be used when creating the cloudservers
client. This is considered an advanced option, use it wisely and
with caution.
version_added: 1.6
extra_create_args:
description:
- A hash of key/value pairs to be used when creating a new server.
This is considered an advanced option, use it wisely and with caution.
version_added: 1.6
files:
description:
- Files to insert into the instance. remotefilename:localcontent
default: null
flavor:
description:
- flavor to use for the instance
default: null
group:
description:
- host group to assign to server, is also used for idempotent operations
to ensure a specific number of instances
version_added: 1.4
image:
description:
- image to use for the instance. Can be an C(id), C(human_id) or C(name).
With I(boot_from_volume), a Cloud Block Storage volume will be created
with this image
default: null
instance_ids:
description:
- list of instance ids, currently only used when state='absent' to
remove instances
version_added: 1.4
key_name:
description:
- key pair to use on the instance
default: null
aliases:
- keypair
meta:
description:
- A hash of metadata to associate with the instance
default: null
name:
description:
- Name to give the instance
default: null
networks:
description:
- The network to attach to the instances. If specified, you must include
ALL networks including the public and private interfaces. Can be C(id)
or C(label).
default:
- public
- private
version_added: 1.4
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
user_data:
description:
- Data to be uploaded to the servers config drive. This option implies
I(config_drive). Can be a file path or a string
version_added: 1.7
wait:
description:
- wait for the instance to be in state 'running' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Jesse Keating (@j2sol)"
- "Matt Martz (@sivel)"
notes:
- I(exact_count) can be "destructive" if the number of running servers in
the I(group) is larger than that specified in I(count). In such a case, the
I(state) is effectively set to C(absent) and the extra servers are deleted.
In the case of deletion, the returned data structure will have C(action)
set to C(delete), and the oldest servers in the group will be deleted.
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Cloud Server
gather_facts: False
tasks:
- name: Server build request
local_action:
module: rax
credentials: ~/.raxpub
name: rax-test1
flavor: 5
image: b11d9567-e412-4255-96b9-bd63ab23bcfe
key_name: my_rackspace_key
files:
/root/test.txt: /home/localuser/test.txt
wait: yes
state: present
networks:
- private
- public
register: rax
- name: Build an exact count of cloud servers with incremented names
hosts: local
gather_facts: False
tasks:
- name: Server build requests
local_action:
module: rax
credentials: ~/.raxpub
name: test%03d.example.org
flavor: performance1-1
image: ubuntu-1204-lts-precise-pangolin
state: present
count: 10
count_offset: 10
exact_count: yes
group: test
wait: yes
register: rax
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_find_server_image(module, server, image, boot_volume):
if not image and boot_volume:
vol = rax_find_bootable_volume(module, pyrax, server,
exit=False)
if not vol:
return None
volume_image_metadata = vol.volume_image_metadata
vol_image_id = volume_image_metadata.get('image_id')
if vol_image_id:
server_image = rax_find_image(module, pyrax,
vol_image_id, exit=False)
if server_image:
server.image = dict(id=server_image)
# Match image IDs taking care of boot from volume
if image and not server.image:
vol = rax_find_bootable_volume(module, pyrax, server)
volume_image_metadata = vol.volume_image_metadata
vol_image_id = volume_image_metadata.get('image_id')
if not vol_image_id:
return None
server_image = rax_find_image(module, pyrax,
vol_image_id, exit=False)
if image != server_image:
return None
server.image = dict(id=server_image)
elif image and server.image['id'] != image:
return None
return server.image
def create(module, names=[], flavor=None, image=None, meta={}, key_name=None,
files={}, wait=True, wait_timeout=300, disk_config=None,
group=None, nics=[], extra_create_args={}, user_data=None,
config_drive=False, existing=[], block_device_mapping_v2=[]):
cs = pyrax.cloudservers
changed = False
if user_data:
config_drive = True
if user_data and os.path.isfile(user_data):
try:
f = open(user_data)
user_data = f.read()
f.close()
except Exception, e:
module.fail_json(msg='Failed to load %s' % user_data)
# Handle the file contents
for rpath in files.keys():
lpath = os.path.expanduser(files[rpath])
try:
fileobj = open(lpath, 'r')
files[rpath] = fileobj.read()
fileobj.close()
except Exception, e:
module.fail_json(msg='Failed to load %s' % lpath)
try:
servers = []
bdmv2 = block_device_mapping_v2
for name in names:
servers.append(cs.servers.create(name=name, image=image,
flavor=flavor, meta=meta,
key_name=key_name,
files=files, nics=nics,
disk_config=disk_config,
config_drive=config_drive,
userdata=user_data,
block_device_mapping_v2=bdmv2,
**extra_create_args))
except Exception, e:
if e.message:
msg = str(e.message)
else:
msg = repr(e)
module.fail_json(msg=msg)
else:
changed = True
if wait:
end_time = time.time() + wait_timeout
infinite = wait_timeout == 0
while infinite or time.time() < end_time:
for server in servers:
try:
server.get()
except:
server.status == 'ERROR'
if not filter(lambda s: s.status not in FINAL_STATUSES,
servers):
break
time.sleep(5)
success = []
error = []
timeout = []
for server in servers:
try:
server.get()
except:
server.status == 'ERROR'
instance = rax_to_dict(server, 'server')
if server.status == 'ACTIVE' or not wait:
success.append(instance)
elif server.status == 'ERROR':
error.append(instance)
elif wait:
timeout.append(instance)
untouched = [rax_to_dict(s, 'server') for s in existing]
instances = success + untouched
results = {
'changed': changed,
'action': 'create',
'instances': instances,
'success': success,
'error': error,
'timeout': timeout,
'instance_ids': {
'instances': [i['id'] for i in instances],
'success': [i['id'] for i in success],
'error': [i['id'] for i in error],
'timeout': [i['id'] for i in timeout]
}
}
if timeout:
results['msg'] = 'Timeout waiting for all servers to build'
elif error:
results['msg'] = 'Failed to build all servers'
if 'msg' in results:
module.fail_json(**results)
else:
module.exit_json(**results)
def delete(module, instance_ids=[], wait=True, wait_timeout=300, kept=[]):
cs = pyrax.cloudservers
changed = False
instances = {}
servers = []
for instance_id in instance_ids:
servers.append(cs.servers.get(instance_id))
for server in servers:
try:
server.delete()
except Exception, e:
module.fail_json(msg=e.message)
else:
changed = True
instance = rax_to_dict(server, 'server')
instances[instance['id']] = instance
# If requested, wait for server deletion
if wait:
end_time = time.time() + wait_timeout
infinite = wait_timeout == 0
while infinite or time.time() < end_time:
for server in servers:
instance_id = server.id
try:
server.get()
except:
instances[instance_id]['status'] = 'DELETED'
instances[instance_id]['rax_status'] = 'DELETED'
if not filter(lambda s: s['status'] not in ('', 'DELETED',
'ERROR'),
instances.values()):
break
time.sleep(5)
timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'),
instances.values())
error = filter(lambda s: s['status'] in ('ERROR'),
instances.values())
success = filter(lambda s: s['status'] in ('', 'DELETED'),
instances.values())
instances = [rax_to_dict(s, 'server') for s in kept]
results = {
'changed': changed,
'action': 'delete',
'instances': instances,
'success': success,
'error': error,
'timeout': timeout,
'instance_ids': {
'instances': [i['id'] for i in instances],
'success': [i['id'] for i in success],
'error': [i['id'] for i in error],
'timeout': [i['id'] for i in timeout]
}
}
if timeout:
results['msg'] = 'Timeout waiting for all servers to delete'
elif error:
results['msg'] = 'Failed to delete all servers'
if 'msg' in results:
module.fail_json(**results)
else:
module.exit_json(**results)
def cloudservers(module, state=None, name=None, flavor=None, image=None,
meta={}, key_name=None, files={}, wait=True, wait_timeout=300,
disk_config=None, count=1, group=None, instance_ids=[],
exact_count=False, networks=[], count_offset=0,
auto_increment=False, extra_create_args={}, user_data=None,
config_drive=False, boot_from_volume=False,
boot_volume=None, boot_volume_size=None,
boot_volume_terminate=False):
cs = pyrax.cloudservers
cnw = pyrax.cloud_networks
if not cnw:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present' or (state == 'absent' and instance_ids is None):
if not boot_from_volume and not boot_volume and not image:
module.fail_json(msg='image is required for the "rax" module')
for arg, value in dict(name=name, flavor=flavor).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax" module' %
arg)
if boot_from_volume and not image and not boot_volume:
module.fail_json(msg='image or boot_volume are required for the '
'"rax" with boot_from_volume')
if boot_from_volume and image and not boot_volume_size:
module.fail_json(msg='boot_volume_size is required for the "rax" '
'module with boot_from_volume and image')
if boot_from_volume and image and boot_volume:
image = None
servers = []
# Add the group meta key
if group and 'group' not in meta:
meta['group'] = group
elif 'group' in meta and group is None:
group = meta['group']
# Normalize and ensure all metadata values are strings
for k, v in meta.items():
if isinstance(v, list):
meta[k] = ','.join(['%s' % i for i in v])
elif isinstance(v, dict):
meta[k] = json.dumps(v)
elif not isinstance(v, basestring):
meta[k] = '%s' % v
# When using state=absent with group, the absent block won't match the
# names properly. Use the exact_count functionality to decrease the count
# to the desired level
was_absent = False
if group is not None and state == 'absent':
exact_count = True
state = 'present'
was_absent = True
if image:
image = rax_find_image(module, pyrax, image)
nics = []
if networks:
for network in networks:
nics.extend(rax_find_network(module, pyrax, network))
# act on the state
if state == 'present':
# Idempotent ensurance of a specific count of servers
if exact_count is not False:
# See if we can find servers that match our options
if group is None:
module.fail_json(msg='"group" must be provided when using '
'"exact_count"')
if auto_increment:
numbers = set()
# See if the name is a printf like string, if not append
# %d to the end
try:
name % 0
except TypeError, e:
if e.message.startswith('not all'):
name = '%s%%d' % name
else:
module.fail_json(msg=e.message)
# regex pattern to match printf formatting
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list():
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if server.metadata.get('group') == group:
servers.append(server)
match = re.search(pattern, server.name)
if match:
number = int(match.group(1))
numbers.add(number)
number_range = xrange(count_offset, count_offset + count)
available_numbers = list(set(number_range)
.difference(numbers))
else: # Not auto incrementing
for server in cs.servers.list():
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if server.metadata.get('group') == group:
servers.append(server)
# available_numbers not needed here, we inspect auto_increment
# again later
# If state was absent but the count was changed,
# assume we only wanted to remove that number of instances
if was_absent:
diff = len(servers) - count
if diff < 0:
count = 0
else:
count = diff
if len(servers) > count:
# We have more servers than we need, set state='absent'
# and delete the extras, this should delete the oldest
state = 'absent'
kept = servers[:count]
del servers[:count]
instance_ids = []
for server in servers:
instance_ids.append(server.id)
delete(module, instance_ids=instance_ids, wait=wait,
wait_timeout=wait_timeout, kept=kept)
elif len(servers) < count:
# we have fewer servers than we need
if auto_increment:
# auto incrementing server numbers
names = []
name_slice = count - len(servers)
numbers_to_use = available_numbers[:name_slice]
for number in numbers_to_use:
names.append(name % number)
else:
# We are not auto incrementing server numbers,
# create a list of 'name' that matches how many we need
names = [name] * (count - len(servers))
else:
# we have the right number of servers, just return info
# about all of the matched servers
instances = []
instance_ids = []
for server in servers:
instances.append(rax_to_dict(server, 'server'))
instance_ids.append(server.id)
module.exit_json(changed=False, action=None,
instances=instances,
success=[], error=[], timeout=[],
instance_ids={'instances': instance_ids,
'success': [], 'error': [],
'timeout': []})
else: # not called with exact_count=True
if group is not None:
if auto_increment:
# we are auto incrementing server numbers, but not with
# exact_count
numbers = set()
# See if the name is a printf like string, if not append
# %d to the end
try:
name % 0
except TypeError, e:
if e.message.startswith('not all'):
name = '%s%%d' % name
else:
module.fail_json(msg=e.message)
# regex pattern to match printf formatting
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list():
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if server.metadata.get('group') == group:
servers.append(server)
match = re.search(pattern, server.name)
if match:
number = int(match.group(1))
numbers.add(number)
number_range = xrange(count_offset,
count_offset + count + len(numbers))
available_numbers = list(set(number_range)
.difference(numbers))
names = []
numbers_to_use = available_numbers[:count]
for number in numbers_to_use:
names.append(name % number)
else:
# Not auto incrementing
names = [name] * count
else:
# No group was specified, and not using exact_count
# Perform more simplistic matching
search_opts = {
'name': '^%s$' % name,
'flavor': flavor
}
servers = []
for server in cs.servers.list(search_opts=search_opts):
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if not rax_find_server_image(module, server, image,
boot_volume):
continue
# Ignore servers with non matching metadata
if server.metadata != meta:
continue
servers.append(server)
if len(servers) >= count:
# We have more servers than were requested, don't do
# anything. Not running with exact_count=True, so we assume
# more is OK
instances = []
for server in servers:
instances.append(rax_to_dict(server, 'server'))
instance_ids = [i['id'] for i in instances]
module.exit_json(changed=False, action=None,
instances=instances, success=[], error=[],
timeout=[],
instance_ids={'instances': instance_ids,
'success': [], 'error': [],
'timeout': []})
# We need more servers to reach out target, create names for
# them, we aren't performing auto_increment here
names = [name] * (count - len(servers))
block_device_mapping_v2 = []
if boot_from_volume:
mapping = {
'boot_index': '0',
'delete_on_termination': boot_volume_terminate,
'destination_type': 'volume',
}
if image:
mapping.update({
'uuid': image,
'source_type': 'image',
'volume_size': boot_volume_size,
})
image = None
elif boot_volume:
volume = rax_find_volume(module, pyrax, boot_volume)
mapping.update({
'uuid': pyrax.utils.get_id(volume),
'source_type': 'volume',
})
block_device_mapping_v2.append(mapping)
create(module, names=names, flavor=flavor, image=image,
meta=meta, key_name=key_name, files=files, wait=wait,
wait_timeout=wait_timeout, disk_config=disk_config, group=group,
nics=nics, extra_create_args=extra_create_args,
user_data=user_data, config_drive=config_drive,
existing=servers,
block_device_mapping_v2=block_device_mapping_v2)
elif state == 'absent':
if instance_ids is None:
# We weren't given an explicit list of server IDs to delete
# Let's match instead
search_opts = {
'name': '^%s$' % name,
'flavor': flavor
}
for server in cs.servers.list(search_opts=search_opts):
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if not rax_find_server_image(module, server, image,
boot_volume):
continue
# Ignore servers with non matching metadata
if meta != server.metadata:
continue
servers.append(server)
# Build a list of server IDs to delete
instance_ids = []
for server in servers:
if len(instance_ids) < count:
instance_ids.append(server.id)
else:
break
if not instance_ids:
# No server IDs were matched for deletion, or no IDs were
# explicitly provided, just exit and don't do anything
module.exit_json(changed=False, action=None, instances=[],
success=[], error=[], timeout=[],
instance_ids={'instances': [],
'success': [], 'error': [],
'timeout': []})
delete(module, instance_ids=instance_ids, wait=wait,
wait_timeout=wait_timeout)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
auto_increment=dict(default=True, type='bool'),
boot_from_volume=dict(default=False, type='bool'),
boot_volume=dict(type='str'),
boot_volume_size=dict(type='int', default=100),
boot_volume_terminate=dict(type='bool', default=False),
config_drive=dict(default=False, type='bool'),
count=dict(default=1, type='int'),
count_offset=dict(default=1, type='int'),
disk_config=dict(choices=['auto', 'manual']),
exact_count=dict(default=False, type='bool'),
extra_client_args=dict(type='dict', default={}),
extra_create_args=dict(type='dict', default={}),
files=dict(type='dict', default={}),
flavor=dict(),
group=dict(),
image=dict(),
instance_ids=dict(type='list'),
key_name=dict(aliases=['keypair']),
meta=dict(type='dict', default={}),
name=dict(),
networks=dict(type='list', default=['public', 'private']),
service=dict(),
state=dict(default='present', choices=['present', 'absent']),
user_data=dict(no_log=True),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
service = module.params.get('service')
if service is not None:
module.fail_json(msg='The "service" attribute has been deprecated, '
'please remove "service: cloudservers" from your '
'playbook pertaining to the "rax" module')
auto_increment = module.params.get('auto_increment')
boot_from_volume = module.params.get('boot_from_volume')
boot_volume = module.params.get('boot_volume')
boot_volume_size = module.params.get('boot_volume_size')
boot_volume_terminate = module.params.get('boot_volume_terminate')
config_drive = module.params.get('config_drive')
count = module.params.get('count')
count_offset = module.params.get('count_offset')
disk_config = module.params.get('disk_config')
if disk_config:
disk_config = disk_config.upper()
exact_count = module.params.get('exact_count', False)
extra_client_args = module.params.get('extra_client_args')
extra_create_args = module.params.get('extra_create_args')
files = module.params.get('files')
flavor = module.params.get('flavor')
group = module.params.get('group')
image = module.params.get('image')
instance_ids = module.params.get('instance_ids')
key_name = module.params.get('key_name')
meta = module.params.get('meta')
name = module.params.get('name')
networks = module.params.get('networks')
state = module.params.get('state')
user_data = module.params.get('user_data')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
setup_rax_module(module, pyrax)
if extra_client_args:
pyrax.cloudservers = pyrax.connect_to_cloudservers(
region=pyrax.cloudservers.client.region_name,
**extra_client_args)
client = pyrax.cloudservers.client
if 'bypass_url' in extra_client_args:
client.management_url = extra_client_args['bypass_url']
if pyrax.cloudservers is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
cloudservers(module, state=state, name=name, flavor=flavor,
image=image, meta=meta, key_name=key_name, files=files,
wait=wait, wait_timeout=wait_timeout, disk_config=disk_config,
count=count, group=group, instance_ids=instance_ids,
exact_count=exact_count, networks=networks,
count_offset=count_offset, auto_increment=auto_increment,
extra_create_args=extra_create_args, user_data=user_data,
config_drive=config_drive, boot_from_volume=boot_from_volume,
boot_volume=boot_volume, boot_volume_size=boot_volume_size,
boot_volume_terminate=boot_volume_terminate)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
|
gpl-3.0
|
sajuptpm/manila
|
contrib/tempest/tempest/api/share/test_shares_actions_negative.py
|
2
|
5001
|
# Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc # noqa
import testtools # noqa
from tempest.api.share import base
from tempest import clients_share as clients
from tempest import config_share as config
from tempest import test
CONF = config.CONF
class SharesActionsNegativeTest(base.BaseSharesTest):
@classmethod
def resource_setup(cls):
super(SharesActionsNegativeTest, cls).resource_setup()
cls.share = cls.create_share(
size=1,
)
@test.attr(type=["negative", ])
@testtools.skipUnless(
CONF.share.run_extend_tests,
"Share extend tests are disabled.")
def test_share_extend_over_quota(self):
tenant_quotas = self.shares_client.show_quotas(
self.shares_client.tenant_id)
new_size = int(tenant_quotas["gigabytes"]) + 1
# extend share with over quota and check result
self.assertRaises(lib_exc.Forbidden,
self.shares_client.extend_share,
self.share['id'],
new_size)
@test.attr(type=["negative", ])
@testtools.skipUnless(
CONF.share.run_extend_tests,
"Share extend tests are disabled.")
def test_share_extend_with_less_size(self):
new_size = int(self.share['size']) - 1
# extend share with invalid size and check result
self.assertRaises(lib_exc.BadRequest,
self.shares_client.extend_share,
self.share['id'],
new_size)
@test.attr(type=["negative", ])
@testtools.skipUnless(
CONF.share.run_extend_tests,
"Share extend tests are disabled.")
def test_share_extend_with_same_size(self):
new_size = int(self.share['size'])
# extend share with invalid size and check result
self.assertRaises(lib_exc.BadRequest,
self.shares_client.extend_share,
self.share['id'],
new_size)
@test.attr(type=["negative", ])
@testtools.skipUnless(
CONF.share.run_extend_tests,
"Share extend tests are disabled.")
def test_share_extend_with_invalid_share_state(self):
share = self.create_share(size=1, cleanup_in_class=False)
new_size = int(share['size']) + 1
# set "error" state
admin_client = clients.AdminManager().shares_client
admin_client.reset_state(share['id'])
# run extend operation on same share and check result
self.assertRaises(lib_exc.BadRequest,
self.shares_client.extend_share,
share['id'],
new_size)
@test.attr(type=["negative", ])
@testtools.skipUnless(
CONF.share.run_shrink_tests,
"Share shrink tests are disabled.")
def test_share_shrink_with_greater_size(self):
new_size = int(self.share['size']) + 1
# shrink share with invalid size and check result
self.assertRaises(lib_exc.BadRequest,
self.shares_client.shrink_share,
self.share['id'],
new_size)
@test.attr(type=["negative", ])
@testtools.skipUnless(
CONF.share.run_shrink_tests,
"Share shrink tests are disabled.")
def test_share_shrink_with_same_size(self):
new_size = int(self.share['size'])
# shrink share with invalid size and check result
self.assertRaises(lib_exc.BadRequest,
self.shares_client.shrink_share,
self.share['id'],
new_size)
@test.attr(type=["negative", ])
@testtools.skipUnless(
CONF.share.run_shrink_tests,
"Share shrink tests are disabled.")
def test_share_shrink_with_invalid_share_state(self):
share = self.create_share(size=2, cleanup_in_class=False)
new_size = int(share['size']) - 1
# set "error" state
admin_client = clients.AdminManager().shares_client
admin_client.reset_state(share['id'])
# run shrink operation on same share and check result
self.assertRaises(lib_exc.BadRequest,
self.shares_client.shrink_share,
share['id'],
new_size)
|
apache-2.0
|
cjlee112/socraticqs2
|
mysite/fsm/views.py
|
1
|
3313
|
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from fsm.models import (
FSMState,
FSMBadUserError,
FSMStackResumeError,
)
from ct.views import PageData
from ct.forms import (
set_crispy_action,
CancelForm,
LogoutForm
)
@login_required
def fsm_node(request, node_id):
"""
Display standard FSM node explanation & next steps page.
"""
pageData = PageData(request)
if not pageData.fsmStack.state or pageData.fsmStack.state.fsmNode.pk != int(node_id):
return HttpResponseRedirect('/ct/')
if request.method == 'POST' and 'fsmedge' in request.POST:
return pageData.fsm_redirect(request, request.POST['fsmedge'])
addNextButton = (pageData.fsmStack.state.fsmNode.outgoing.count() == 1)
return pageData.render(
request, 'fsm/fsm_node.html', addNextButton=addNextButton
)
@login_required
def fsm_status(request):
"""
Display Activity Center UI.
"""
pageData = PageData(request)
cancelForm = logoutForm = None
nextSteps = ()
if request.method == 'POST':
task = request.POST.get('task', '')
if 'fsmstate_id' in request.POST:
try:
url = pageData.fsmStack.resume(
request, request.POST['fsmstate_id']
)
except FSMBadUserError:
pageData.errorMessage = 'Cannot access activity belonging to another user'
except FSMStackResumeError:
pageData.errorMessage = """This activity is waiting for a sub-activity to complete,
and hence cannot be resumed (you should complete or cancel
the sub-activity first)."""
except FSMState.DoesNotExist:
pageData.errorMessage = 'Activity not found!'
else: # redirect to this activity
return HttpResponseRedirect(url)
elif not pageData.fsmStack.state:
pageData.errorMessage = 'No activity ongoing currently!'
elif 'abort' == task:
pageData.fsmStack.pop(request, eventName='exceptCancel')
pageData.statusMessage = 'Activity canceled.'
# follow this optional edge
elif pageData.fsmStack.state.fsmNode.outgoing.filter(name=task).count() > 0:
return pageData.fsm_redirect(request, task, vagueEvents=())
if not pageData.fsmStack.state: # search for unfinished activities
unfinished = FSMState.objects.filter(user=request.user, children__isnull=True)
else: # provide options to cancel or quit this activity
unfinished = None
cancelForm = CancelForm()
set_crispy_action(request.path, cancelForm)
edges = pageData.fsmStack.state.fsmNode.outgoing
nextSteps = edges.filter(showOption=True)
logoutForm = LogoutForm()
set_crispy_action(
reverse('ct:person_profile', args=(request.user.id,)),
logoutForm
)
return pageData.render(
request,
'fsm/fsm_status.html',
dict(
cancelForm=cancelForm,
unfinished=unfinished,
logoutForm=logoutForm,
nextSteps=nextSteps
)
)
|
apache-2.0
|
fnouama/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/pvec.py
|
94
|
5989
|
# pvec.py - probabilistic vector clocks for Mercurial
#
# Copyright 2012 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''
A "pvec" is a changeset property based on the theory of vector clocks
that can be compared to discover relatedness without consulting a
graph. This can be useful for tasks like determining how a
disconnected patch relates to a repository.
Currently a pvec consist of 448 bits, of which 24 are 'depth' and the
remainder are a bit vector. It is represented as a 70-character base85
string.
Construction:
- a root changeset has a depth of 0 and a bit vector based on its hash
- a normal commit has a changeset where depth is increased by one and
one bit vector bit is flipped based on its hash
- a merge changeset pvec is constructed by copying changes from one pvec into
the other to balance its depth
Properties:
- for linear changes, difference in depth is always <= hamming distance
- otherwise, changes are probably divergent
- when hamming distance is < 200, we can reliably detect when pvecs are near
Issues:
- hamming distance ceases to work over distances of ~ 200
- detecting divergence is less accurate when the common ancestor is very close
to either revision or total distance is high
- this could probably be improved by modeling the relation between
delta and hdist
Uses:
- a patch pvec can be used to locate the nearest available common ancestor for
resolving conflicts
- ordering of patches can be established without a DAG
- two head pvecs can be compared to determine whether push/pull/merge is needed
and approximately how many changesets are involved
- can be used to find a heuristic divergence measure between changesets on
different branches
'''
import base85, util
from node import nullrev
_size = 448 # 70 chars b85-encoded
_bytes = _size / 8
_depthbits = 24
_depthbytes = _depthbits / 8
_vecbytes = _bytes - _depthbytes
_vecbits = _vecbytes * 8
_radius = (_vecbits - 30) / 2 # high probability vectors are related
def _bin(bs):
'''convert a bytestring to a long'''
v = 0
for b in bs:
v = v * 256 + ord(b)
return v
def _str(v, l):
bs = ""
for p in xrange(l):
bs = chr(v & 255) + bs
v >>= 8
return bs
def _split(b):
'''depth and bitvec'''
return _bin(b[:_depthbytes]), _bin(b[_depthbytes:])
def _join(depth, bitvec):
return _str(depth, _depthbytes) + _str(bitvec, _vecbytes)
def _hweight(x):
c = 0
while x:
if x & 1:
c += 1
x >>= 1
return c
_htab = [_hweight(x) for x in xrange(256)]
def _hamming(a, b):
'''find the hamming distance between two longs'''
d = a ^ b
c = 0
while d:
c += _htab[d & 0xff]
d >>= 8
return c
def _mergevec(x, y, c):
# Ideally, this function would be x ^ y ^ ancestor, but finding
# ancestors is a nuisance. So instead we find the minimal number
# of changes to balance the depth and hamming distance
d1, v1 = x
d2, v2 = y
if d1 < d2:
d1, d2, v1, v2 = d2, d1, v2, v1
hdist = _hamming(v1, v2)
ddist = d1 - d2
v = v1
m = v1 ^ v2 # mask of different bits
i = 1
if hdist > ddist:
# if delta = 10 and hdist = 100, then we need to go up 55 steps
# to the ancestor and down 45
changes = (hdist - ddist + 1) / 2
else:
# must make at least one change
changes = 1
depth = d1 + changes
# copy changes from v2
if m:
while changes:
if m & i:
v ^= i
changes -= 1
i <<= 1
else:
v = _flipbit(v, c)
return depth, v
def _flipbit(v, node):
# converting bit strings to longs is slow
bit = (hash(node) & 0xffffffff) % _vecbits
return v ^ (1<<bit)
def ctxpvec(ctx):
'''construct a pvec for ctx while filling in the cache'''
r = ctx._repo
if not util.safehasattr(r, "_pveccache"):
r._pveccache = {}
pvc = r._pveccache
if ctx.rev() not in pvc:
cl = r.changelog
for n in xrange(ctx.rev() + 1):
if n not in pvc:
node = cl.node(n)
p1, p2 = cl.parentrevs(n)
if p1 == nullrev:
# start with a 'random' vector at root
pvc[n] = (0, _bin((node * 3)[:_vecbytes]))
elif p2 == nullrev:
d, v = pvc[p1]
pvc[n] = (d + 1, _flipbit(v, node))
else:
pvc[n] = _mergevec(pvc[p1], pvc[p2], node)
bs = _join(*pvc[ctx.rev()])
return pvec(base85.b85encode(bs))
class pvec(object):
def __init__(self, hashorctx):
if isinstance(hashorctx, str):
self._bs = hashorctx
self._depth, self._vec = _split(base85.b85decode(hashorctx))
else:
self._vec = ctxpvec(hashorctx)
def __str__(self):
return self._bs
def __eq__(self, b):
return self._vec == b._vec and self._depth == b._depth
def __lt__(self, b):
delta = b._depth - self._depth
if delta < 0:
return False # always correct
if _hamming(self._vec, b._vec) > delta:
return False
return True
def __gt__(self, b):
return b < self
def __or__(self, b):
delta = abs(b._depth - self._depth)
if _hamming(self._vec, b._vec) <= delta:
return False
return True
def __sub__(self, b):
if self | b:
raise ValueError("concurrent pvecs")
return self._depth - b._depth
def distance(self, b):
d = abs(b._depth - self._depth)
h = _hamming(self._vec, b._vec)
return max(d, h)
def near(self, b):
dist = abs(b.depth - self._depth)
if dist > _radius or _hamming(self._vec, b._vec) > _radius:
return False
|
apache-2.0
|
sysalexis/kbengine
|
kbe/res/scripts/common/Lib/test/test_json/test_scanstring.py
|
88
|
4645
|
import sys
from test.test_json import PyTest, CTest
class TestScanstring:
def test_scanstring(self):
scanstring = self.json.decoder.scanstring
self.assertEqual(
scanstring('"z\U0001d120x"', 1, True),
('z\U0001d120x', 5))
self.assertEqual(
scanstring('"\\u007b"', 1, True),
('{', 8))
self.assertEqual(
scanstring('"A JSON payload should be an object or array, not a string."', 1, True),
('A JSON payload should be an object or array, not a string.', 60))
self.assertEqual(
scanstring('["Unclosed array"', 2, True),
('Unclosed array', 17))
self.assertEqual(
scanstring('["extra comma",]', 2, True),
('extra comma', 14))
self.assertEqual(
scanstring('["double extra comma",,]', 2, True),
('double extra comma', 21))
self.assertEqual(
scanstring('["Comma after the close"],', 2, True),
('Comma after the close', 24))
self.assertEqual(
scanstring('["Extra close"]]', 2, True),
('Extra close', 14))
self.assertEqual(
scanstring('{"Extra comma": true,}', 2, True),
('Extra comma', 14))
self.assertEqual(
scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, True),
('Extra value after close', 26))
self.assertEqual(
scanstring('{"Illegal expression": 1 + 2}', 2, True),
('Illegal expression', 21))
self.assertEqual(
scanstring('{"Illegal invocation": alert()}', 2, True),
('Illegal invocation', 21))
self.assertEqual(
scanstring('{"Numbers cannot have leading zeroes": 013}', 2, True),
('Numbers cannot have leading zeroes', 37))
self.assertEqual(
scanstring('{"Numbers cannot be hex": 0x14}', 2, True),
('Numbers cannot be hex', 24))
self.assertEqual(
scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, True),
('Too deep', 30))
self.assertEqual(
scanstring('{"Missing colon" null}', 2, True),
('Missing colon', 16))
self.assertEqual(
scanstring('{"Double colon":: null}', 2, True),
('Double colon', 15))
self.assertEqual(
scanstring('{"Comma instead of colon", null}', 2, True),
('Comma instead of colon', 25))
self.assertEqual(
scanstring('["Colon instead of comma": false]', 2, True),
('Colon instead of comma', 25))
self.assertEqual(
scanstring('["Bad value", truth]', 2, True),
('Bad value', 12))
def test_surrogates(self):
scanstring = self.json.decoder.scanstring
def assertScan(given, expect):
self.assertEqual(scanstring(given, 1, True),
(expect, len(given)))
assertScan('"z\\ud834\\u0079x"', 'z\ud834yx')
assertScan('"z\\ud834\\udd20x"', 'z\U0001d120x')
assertScan('"z\\ud834\\ud834\\udd20x"', 'z\ud834\U0001d120x')
assertScan('"z\\ud834x"', 'z\ud834x')
assertScan('"z\\ud834\udd20x12345"', 'z\ud834\udd20x12345')
assertScan('"z\\udd20x"', 'z\udd20x')
assertScan('"z\ud834\udd20x"', 'z\ud834\udd20x')
assertScan('"z\ud834\\udd20x"', 'z\ud834\udd20x')
assertScan('"z\ud834x"', 'z\ud834x')
def test_bad_escapes(self):
scanstring = self.json.decoder.scanstring
bad_escapes = [
'"\\"',
'"\\x"',
'"\\u"',
'"\\u0"',
'"\\u01"',
'"\\u012"',
'"\\uz012"',
'"\\u0z12"',
'"\\u01z2"',
'"\\u012z"',
'"\\u0x12"',
'"\\u0X12"',
'"\\ud834\\"',
'"\\ud834\\u"',
'"\\ud834\\ud"',
'"\\ud834\\udd"',
'"\\ud834\\udd2"',
'"\\ud834\\uzdd2"',
'"\\ud834\\udzd2"',
'"\\ud834\\uddz2"',
'"\\ud834\\udd2z"',
'"\\ud834\\u0x20"',
'"\\ud834\\u0X20"',
]
for s in bad_escapes:
with self.assertRaises(ValueError, msg=s):
scanstring(s, 1, True)
def test_overflow(self):
with self.assertRaises(OverflowError):
self.json.decoder.scanstring(b"xxx", sys.maxsize+1)
class TestPyScanstring(TestScanstring, PyTest): pass
class TestCScanstring(TestScanstring, CTest): pass
|
lgpl-3.0
|
aleju/imgaug
|
test/augmenters/test_edges.py
|
2
|
28288
|
from __future__ import print_function, division, absolute_import
import itertools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
import cv2
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import random as iarandom
from imgaug.testutils import (reseed, runtest_pickleable_uint8_img,
is_parameter_instance, remove_prefetching)
class TestRandomColorsBinaryImageColorizer(unittest.TestCase):
def setUp(self):
reseed()
def test___init___default_settings(self):
colorizer = iaa.RandomColorsBinaryImageColorizer()
assert is_parameter_instance(colorizer.color_true, iap.DiscreteUniform)
assert is_parameter_instance(colorizer.color_false, iap.DiscreteUniform)
assert colorizer.color_true.a.value == 0
assert colorizer.color_true.b.value == 255
assert colorizer.color_false.a.value == 0
assert colorizer.color_false.b.value == 255
def test___init___deterministic_settinga(self):
colorizer = iaa.RandomColorsBinaryImageColorizer(color_true=1,
color_false=2)
assert is_parameter_instance(colorizer.color_true, iap.Deterministic)
assert is_parameter_instance(colorizer.color_false, iap.Deterministic)
assert colorizer.color_true.value == 1
assert colorizer.color_false.value == 2
def test___init___tuple_and_list(self):
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=(0, 100), color_false=[200, 201, 202])
assert is_parameter_instance(colorizer.color_true, iap.DiscreteUniform)
assert is_parameter_instance(colorizer.color_false, iap.Choice)
assert colorizer.color_true.a.value == 0
assert colorizer.color_true.b.value == 100
assert colorizer.color_false.a[0] == 200
assert colorizer.color_false.a[1] == 201
assert colorizer.color_false.a[2] == 202
def test___init___stochastic_parameters(self):
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=iap.DiscreteUniform(0, 100),
color_false=iap.Choice([200, 201, 202]))
assert is_parameter_instance(colorizer.color_true, iap.DiscreteUniform)
assert is_parameter_instance(colorizer.color_false, iap.Choice)
assert colorizer.color_true.a.value == 0
assert colorizer.color_true.b.value == 100
assert colorizer.color_false.a[0] == 200
assert colorizer.color_false.a[1] == 201
assert colorizer.color_false.a[2] == 202
def test__draw_samples(self):
class _ListSampler(iap.StochasticParameter):
def __init__(self, offset):
super(_ListSampler, self).__init__()
self.offset = offset
self.last_random_state = None
def _draw_samples(self, size, random_state=None):
assert size == (3,)
self.last_random_state = random_state
return np.uint8([0, 1, 2]) + self.offset
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=_ListSampler(0),
color_false=_ListSampler(1))
random_state = iarandom.RNG(42)
color_true, color_false = colorizer._draw_samples(random_state)
assert np.array_equal(color_true, [0, 1, 2])
assert np.array_equal(color_false, [1, 2, 3])
assert colorizer.color_true.last_random_state.equals(random_state)
assert colorizer.color_false.last_random_state.equals(random_state)
def test_colorize__one_channel(self):
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=100,
color_false=10)
random_state = iarandom.RNG(42)
# input image has shape (H,W,1)
image = np.zeros((5, 5, 1), dtype=np.uint8)
image[:, 0:3, :] = 255
image_binary = np.zeros((5, 5), dtype=bool)
image_binary[:, 0:3] = True
image_color = colorizer.colorize(
image_binary, image, nth_image=0, random_state=random_state)
assert image_color.ndim == 3
assert image_color.shape[-1] == 1
assert np.all(image_color[image_binary] == 100)
assert np.all(image_color[~image_binary] == 10)
def test_colorize__three_channels(self):
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=100,
color_false=10)
random_state = iarandom.RNG(42)
# input image has shape (H,W,3)
image = np.zeros((5, 5, 3), dtype=np.uint8)
image[:, 0:3, :] = 255
image_binary = np.zeros((5, 5), dtype=bool)
image_binary[:, 0:3] = True
image_color = colorizer.colorize(
image_binary, image, nth_image=0, random_state=random_state)
assert image_color.ndim == 3
assert image_color.shape[-1] == 3
assert np.all(image_color[image_binary] == 100)
assert np.all(image_color[~image_binary] == 10)
def test_colorize__four_channels(self):
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=100,
color_false=10)
random_state = iarandom.RNG(42)
# input image has shape (H,W,4)
image = np.zeros((5, 5, 4), dtype=np.uint8)
image[:, 0:3, 0:3] = 255
image[:, 1:4, 3] = 123 # set some content for alpha channel
image_binary = np.zeros((5, 5), dtype=bool)
image_binary[:, 0:3] = True
image_color = colorizer.colorize(
image_binary, image, nth_image=0, random_state=random_state)
assert image_color.ndim == 3
assert image_color.shape[-1] == 4
assert np.all(image_color[image_binary, 0:3] == 100)
assert np.all(image_color[~image_binary, 0:3] == 10)
# alpha channel must have been kept untouched
assert np.all(image_color[:, :, 3:4] == image[:, :, 3:4])
def test_pickleable(self):
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=(50, 100),
color_false=(10, 50))
colorizer_pkl = pickle.loads(pickle.dumps(colorizer))
random_state = iarandom.RNG(1)
color_true, color_false = colorizer._draw_samples(
random_state.copy())
color_true_pkl, color_false_pkl = colorizer_pkl._draw_samples(
random_state.copy())
assert np.array_equal(color_true, color_true_pkl)
assert np.array_equal(color_false, color_false_pkl)
class TestCanny(unittest.TestCase):
def test___init___default_settings(self):
aug = iaa.Canny()
assert is_parameter_instance(aug.alpha, iap.Uniform)
assert isinstance(aug.hysteresis_thresholds, tuple)
assert is_parameter_instance(aug.sobel_kernel_size, iap.DiscreteUniform)
assert isinstance(aug.colorizer, iaa.RandomColorsBinaryImageColorizer)
assert np.isclose(aug.alpha.a.value, 0.0)
assert np.isclose(aug.alpha.b.value, 1.0)
assert len(aug.hysteresis_thresholds) == 2
assert is_parameter_instance(aug.hysteresis_thresholds[0],
iap.DiscreteUniform)
assert np.isclose(aug.hysteresis_thresholds[0].a.value, 100-40)
assert np.isclose(aug.hysteresis_thresholds[0].b.value, 100+40)
assert is_parameter_instance(aug.hysteresis_thresholds[1],
iap.DiscreteUniform)
assert np.isclose(aug.hysteresis_thresholds[1].a.value, 200-40)
assert np.isclose(aug.hysteresis_thresholds[1].b.value, 200+40)
assert aug.sobel_kernel_size.a.value == 3
assert aug.sobel_kernel_size.b.value == 7
assert is_parameter_instance(aug.colorizer.color_true,
iap.DiscreteUniform)
assert is_parameter_instance(aug.colorizer.color_false,
iap.DiscreteUniform)
assert aug.colorizer.color_true.a.value == 0
assert aug.colorizer.color_true.b.value == 255
assert aug.colorizer.color_false.a.value == 0
assert aug.colorizer.color_false.b.value == 255
def test___init___custom_settings(self):
aug = iaa.Canny(
alpha=0.2,
hysteresis_thresholds=([0, 1, 2], iap.DiscreteUniform(1, 10)),
sobel_kernel_size=[3, 5],
colorizer=iaa.RandomColorsBinaryImageColorizer(
color_true=10, color_false=20)
)
assert is_parameter_instance(aug.alpha, iap.Deterministic)
assert isinstance(aug.hysteresis_thresholds, tuple)
assert is_parameter_instance(aug.sobel_kernel_size, iap.Choice)
assert isinstance(aug.colorizer, iaa.RandomColorsBinaryImageColorizer)
assert np.isclose(aug.alpha.value, 0.2)
assert len(aug.hysteresis_thresholds) == 2
assert is_parameter_instance(aug.hysteresis_thresholds[0], iap.Choice)
assert aug.hysteresis_thresholds[0].a == [0, 1, 2]
assert is_parameter_instance(aug.hysteresis_thresholds[1],
iap.DiscreteUniform)
assert np.isclose(aug.hysteresis_thresholds[1].a.value, 1)
assert np.isclose(aug.hysteresis_thresholds[1].b.value, 10)
assert is_parameter_instance(aug.sobel_kernel_size, iap.Choice)
assert aug.sobel_kernel_size.a == [3, 5]
assert is_parameter_instance(aug.colorizer.color_true,
iap.Deterministic)
assert is_parameter_instance(aug.colorizer.color_false,
iap.Deterministic)
assert aug.colorizer.color_true.value == 10
assert aug.colorizer.color_false.value == 20
def test___init___single_value_hysteresis(self):
aug = iaa.Canny(
alpha=0.2,
hysteresis_thresholds=[0, 1, 2],
sobel_kernel_size=[3, 5],
colorizer=iaa.RandomColorsBinaryImageColorizer(
color_true=10, color_false=20)
)
assert is_parameter_instance(aug.alpha, iap.Deterministic)
assert is_parameter_instance(aug.hysteresis_thresholds, iap.Choice)
assert is_parameter_instance(aug.sobel_kernel_size, iap.Choice)
assert isinstance(aug.colorizer, iaa.RandomColorsBinaryImageColorizer)
assert np.isclose(aug.alpha.value, 0.2)
assert aug.hysteresis_thresholds.a == [0, 1, 2]
assert is_parameter_instance(aug.sobel_kernel_size, iap.Choice)
assert aug.sobel_kernel_size.a == [3, 5]
assert is_parameter_instance(aug.colorizer.color_true,
iap.Deterministic)
assert is_parameter_instance(aug.colorizer.color_false,
iap.Deterministic)
assert aug.colorizer.color_true.value == 10
assert aug.colorizer.color_false.value == 20
def test__draw_samples__single_value_hysteresis(self):
seed = 1
nb_images = 1000
aug = iaa.Canny(
alpha=0.2,
hysteresis_thresholds=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
sobel_kernel_size=[3, 5, 7],
random_state=iarandom.RNG(seed))
aug.alpha = remove_prefetching(aug.alpha)
aug.hysteresis_thresholds = remove_prefetching(
aug.hysteresis_thresholds)
aug.sobel_kernel_size = remove_prefetching(aug.sobel_kernel_size)
example_image = np.zeros((5, 5, 3), dtype=np.uint8)
samples = aug._draw_samples([example_image] * nb_images,
random_state=iarandom.RNG(seed))
alpha_samples = samples[0]
hthresh_samples = samples[1]
sobel_samples = samples[2]
rss = iarandom.RNG(seed).duplicate(4)
alpha_expected = iap.Deterministic(0.2).draw_samples((nb_images,),
rss[0])
hthresh_expected = iap.Choice(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).draw_samples((nb_images, 2),
rss[1])
sobel_expected = iap.Choice([3, 5, 7]).draw_samples((nb_images,),
rss[2])
invalid = hthresh_expected[:, 0] > hthresh_expected[:, 1]
assert np.any(invalid)
hthresh_expected[invalid, :] = hthresh_expected[invalid, :][:, [1, 0]]
assert hthresh_expected.shape == (nb_images, 2)
assert not np.any(hthresh_expected[:, 0] > hthresh_expected[:, 1])
assert np.allclose(alpha_samples, alpha_expected)
assert np.allclose(hthresh_samples, hthresh_expected)
assert np.allclose(sobel_samples, sobel_expected)
def test__draw_samples__tuple_as_hysteresis(self):
seed = 1
nb_images = 10
aug = iaa.Canny(
alpha=0.2,
hysteresis_thresholds=([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
iap.DiscreteUniform(5, 100)),
sobel_kernel_size=[3, 5, 7],
random_state=iarandom.RNG(seed))
aug.alpha = remove_prefetching(aug.alpha)
aug.hysteresis_thresholds = (
remove_prefetching(aug.hysteresis_thresholds[0]),
remove_prefetching(aug.hysteresis_thresholds[1])
)
aug.sobel_kernel_size = remove_prefetching(aug.sobel_kernel_size)
example_image = np.zeros((5, 5, 3), dtype=np.uint8)
samples = aug._draw_samples([example_image] * nb_images,
random_state=iarandom.RNG(seed))
alpha_samples = samples[0]
hthresh_samples = samples[1]
sobel_samples = samples[2]
rss = iarandom.RNG(seed).duplicate(4)
alpha_expected = iap.Deterministic(0.2).draw_samples((nb_images,),
rss[0])
hthresh_expected = [None, None]
hthresh_expected[0] = iap.Choice(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).draw_samples((nb_images,),
rss[1])
# TODO simplify this to rss[2].randint(5, 100+1)
# would currenlty be a bit more ugly, because DiscrUniform
# samples two values for a and b first from rss[2]
hthresh_expected[1] = iap.DiscreteUniform(5, 100).draw_samples(
(nb_images,), rss[2])
hthresh_expected = np.stack(hthresh_expected, axis=-1)
sobel_expected = iap.Choice([3, 5, 7]).draw_samples((nb_images,),
rss[3])
invalid = hthresh_expected[:, 0] > hthresh_expected[:, 1]
hthresh_expected[invalid, :] = hthresh_expected[invalid, :][:, [1, 0]]
assert hthresh_expected.shape == (nb_images, 2)
assert not np.any(hthresh_expected[:, 0] > hthresh_expected[:, 1])
assert np.allclose(alpha_samples, alpha_expected)
assert np.allclose(hthresh_samples, hthresh_expected)
assert np.allclose(sobel_samples, sobel_expected)
def test_augment_images__alpha_is_zero(self):
aug = iaa.Canny(
alpha=0.0,
hysteresis_thresholds=(0, 10),
sobel_kernel_size=[3, 5, 7],
random_state=1)
image = np.arange(5*5*3).astype(np.uint8).reshape((5, 5, 3))
image_aug = aug.augment_image(image)
assert np.array_equal(image_aug, image)
def test_augment_images__alpha_is_one(self):
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=254,
color_false=1
)
aug = iaa.Canny(
alpha=1.0,
hysteresis_thresholds=100,
sobel_kernel_size=3,
colorizer=colorizer,
random_state=1)
image_single_chan = np.uint8([
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0]
])
image = np.tile(image_single_chan[:, :, np.newaxis] * 128, (1, 1, 3))
# canny image, looks a bit unintuitive, but is what OpenCV returns
# can be checked via something like
# print("canny\n", cv2.Canny(image_single_chan*255, threshold1=100,
# threshold2=200,
# apertureSize=3,
# L2gradient=True))
image_canny = np.array([
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0]
], dtype=bool)
image_aug_expected = np.copy(image)
image_aug_expected[image_canny] = 254
image_aug_expected[~image_canny] = 1
image_aug = aug.augment_image(image)
assert np.array_equal(image_aug, image_aug_expected)
def test_augment_images__single_channel(self):
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=254,
color_false=1
)
aug = iaa.Canny(
alpha=1.0,
hysteresis_thresholds=100,
sobel_kernel_size=3,
colorizer=colorizer,
random_state=1)
image_single_chan = np.uint8([
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0]
])
image = image_single_chan[:, :, np.newaxis] * 128
# canny image, looks a bit unintuitive, but is what OpenCV returns
# can be checked via something like
# print("canny\n", cv2.Canny(image_single_chan*255, threshold1=100,
# threshold2=200,
# apertureSize=3,
# L2gradient=True))
image_canny = np.array([
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0]
], dtype=bool)
image_aug_expected = np.copy(image)
image_aug_expected[image_canny] = int(0.299*254
+ 0.587*254
+ 0.114*254)
image_aug_expected[~image_canny] = int(0.299*1 + 0.587*1 + 0.114*1)
image_aug = aug.augment_image(image)
assert np.array_equal(image_aug, image_aug_expected)
def test_augment_images__four_channels(self):
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=254,
color_false=1
)
aug = iaa.Canny(
alpha=1.0,
hysteresis_thresholds=100,
sobel_kernel_size=3,
colorizer=colorizer,
random_state=1)
image_single_chan = np.uint8([
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0]
])
image_alpha_channel = np.uint8([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0]
]) * 255
image = np.tile(image_single_chan[:, :, np.newaxis] * 128, (1, 1, 3))
image = np.dstack([image, image_alpha_channel[:, :, np.newaxis]])
assert image.ndim == 3
assert image.shape[-1] == 4
# canny image, looks a bit unintuitive, but is what OpenCV returns
# can be checked via something like
# print("canny\n", cv2.Canny(image_single_chan*255, threshold1=100,
# threshold2=200,
# apertureSize=3,
# L2gradient=True))
image_canny = np.array([
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0]
], dtype=bool)
image_aug_expected = np.copy(image)
image_aug_expected[image_canny, 0:3] = 254
image_aug_expected[~image_canny, 0:3] = 1
image_aug = aug.augment_image(image)
assert np.array_equal(image_aug, image_aug_expected)
def test_augment_images__random_color(self):
class _Color(iap.StochasticParameter):
def __init__(self, values):
super(_Color, self).__init__()
self.values = values
def _draw_samples(self, size, random_state):
v = random_state.choice(self.values)
return np.full(size, v, dtype=np.uint8)
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=_Color([253, 254]),
color_false=_Color([1, 2])
)
image_single_chan = np.uint8([
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0]
])
image = np.tile(image_single_chan[:, :, np.newaxis] * 128, (1, 1, 3))
# canny image, looks a bit unintuitive, but is what OpenCV returns
# can be checked via something like
# print("canny\n", cv2.Canny(image_single_chan*255, threshold1=100,
# threshold2=200,
# apertureSize=3,
# L2gradient=True))
image_canny = np.array([
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0]
], dtype=bool)
seen = {
(253, 1): False,
(253, 2): False,
(254, 1): False,
(254, 2): False
}
for i in range(100):
aug = iaa.Canny(
alpha=1.0,
hysteresis_thresholds=100,
sobel_kernel_size=3,
colorizer=colorizer,
seed=i)
image_aug = aug.augment_image(image)
color_true = np.unique(image_aug[image_canny])
color_false = np.unique(image_aug[~image_canny])
assert len(color_true) == 1
assert len(color_false) == 1
color_true = int(color_true[0])
color_false = int(color_false[0])
seen[(int(color_true), int(color_false))] = True
assert len(seen.keys()) == 4
if all(seen.values()):
break
assert np.all(seen.values())
def test_augment_images__random_values(self):
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=255,
color_false=0
)
image_single_chan = iarandom.RNG(1).integers(
0, 255, size=(100, 100), dtype="uint8")
image = np.tile(image_single_chan[:, :, np.newaxis], (1, 1, 3))
images_canny_uint8 = {}
for thresh1, thresh2, ksize in itertools.product([100],
[200],
[3, 5]):
if thresh1 > thresh2:
continue
image_canny = cv2.Canny(
image,
threshold1=thresh1,
threshold2=thresh2,
apertureSize=ksize,
L2gradient=True)
image_canny_uint8 = np.tile(
image_canny[:, :, np.newaxis], (1, 1, 3))
similar = 0
for key, image_expected in images_canny_uint8.items():
if np.array_equal(image_canny_uint8, image_expected):
similar += 1
assert similar == 0
images_canny_uint8[(thresh1, thresh2, ksize)] = image_canny_uint8
seen = {key: False for key in images_canny_uint8.keys()}
for i in range(500):
aug = iaa.Canny(
alpha=1.0,
hysteresis_thresholds=(iap.Deterministic(100),
iap.Deterministic(200)),
sobel_kernel_size=[3, 5],
colorizer=colorizer,
seed=i)
image_aug = aug.augment_image(image)
match_index = None
for key, image_expected in images_canny_uint8.items():
if np.array_equal(image_aug, image_expected):
match_index = key
break
assert match_index is not None
seen[match_index] = True
assert len(seen.keys()) == len(images_canny_uint8.keys())
if all(seen.values()):
break
assert np.all(seen.values())
def test_zero_sized_axes(self):
shapes = [
(0, 0, 3),
(0, 1, 3),
(1, 0, 3)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Canny(alpha=1)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
def test_get_parameters(self):
alpha = iap.Deterministic(0.2)
hysteresis_thresholds = iap.Deterministic(10)
sobel_kernel_size = iap.Deterministic(3)
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=10, color_false=20)
aug = iaa.Canny(
alpha=alpha,
hysteresis_thresholds=hysteresis_thresholds,
sobel_kernel_size=sobel_kernel_size,
colorizer=colorizer
)
params = aug.get_parameters()
assert params[0] is aug.alpha
assert params[1] is aug.hysteresis_thresholds
assert params[2] is aug.sobel_kernel_size
assert params[3] is colorizer
def test___str___single_value_hysteresis(self):
alpha = iap.Deterministic(0.2)
hysteresis_thresholds = iap.Deterministic(10)
sobel_kernel_size = iap.Deterministic(3)
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=10, color_false=20)
aug = iaa.Canny(
alpha=alpha,
hysteresis_thresholds=hysteresis_thresholds,
sobel_kernel_size=sobel_kernel_size,
colorizer=colorizer
)
observed = aug.__str__()
expected = ("Canny(alpha=%s, hysteresis_thresholds=%s, "
"sobel_kernel_size=%s, colorizer=%s, name=UnnamedCanny, "
"deterministic=False)") % (
str(aug.alpha),
str(aug.hysteresis_thresholds),
str(aug.sobel_kernel_size),
colorizer)
assert observed == expected
def test___str___tuple_as_hysteresis(self):
alpha = iap.Deterministic(0.2)
hysteresis_thresholds = (
iap.Deterministic(10),
iap.Deterministic(11)
)
sobel_kernel_size = iap.Deterministic(3)
colorizer = iaa.RandomColorsBinaryImageColorizer(
color_true=10, color_false=20)
aug = iaa.Canny(
alpha=alpha,
hysteresis_thresholds=hysteresis_thresholds,
sobel_kernel_size=sobel_kernel_size,
colorizer=colorizer
)
observed = aug.__str__()
expected = ("Canny(alpha=%s, hysteresis_thresholds=(%s, %s), "
"sobel_kernel_size=%s, colorizer=%s, name=UnnamedCanny, "
"deterministic=False)") % (
str(aug.alpha),
str(aug.hysteresis_thresholds[0]),
str(aug.hysteresis_thresholds[1]),
str(aug.sobel_kernel_size),
colorizer)
assert observed == expected
def test_pickleable(self):
aug = iaa.Canny(seed=1)
runtest_pickleable_uint8_img(aug, iterations=20)
|
mit
|
JimCircadian/ansible
|
test/utils/shippable/tools/download.py
|
124
|
10149
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
# (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""CLI tool for downloading results from Shippable CI runs."""
from __future__ import print_function
# noinspection PyCompatibility
import argparse
import json
import os
import re
import requests
try:
import argcomplete
except ImportError:
argcomplete = None
def main():
"""Main program body."""
api_key = get_api_key()
parser = argparse.ArgumentParser(description='Download results from a Shippable run.')
parser.add_argument('run_id',
metavar='RUN',
help='shippable run id, run url or run name formatted as: account/project/run_number')
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true',
help='show what is being downloaded')
parser.add_argument('-t', '--test',
dest='test',
action='store_true',
help='show what would be downloaded without downloading')
parser.add_argument('--key',
dest='api_key',
default=api_key,
required=api_key is None,
help='api key for accessing Shippable')
parser.add_argument('--console-logs',
action='store_true',
help='download console logs')
parser.add_argument('--test-results',
action='store_true',
help='download test results')
parser.add_argument('--coverage-results',
action='store_true',
help='download code coverage results')
parser.add_argument('--job-metadata',
action='store_true',
help='download job metadata')
parser.add_argument('--run-metadata',
action='store_true',
help='download run metadata')
parser.add_argument('--all',
action='store_true',
help='download everything')
parser.add_argument('--job-number',
metavar='N',
action='append',
type=int,
help='limit downloads to the given job number')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
old_runs_prefix = 'https://app.shippable.com/runs/'
if args.run_id.startswith(old_runs_prefix):
args.run_id = args.run_id[len(old_runs_prefix):]
if args.all:
args.console_logs = True
args.test_results = True
args.coverage_results = True
args.job_metadata = True
args.run_metadata = True
selections = (
args.console_logs,
args.test_results,
args.coverage_results,
args.job_metadata,
args.run_metadata,
)
if not any(selections):
parser.error('At least one download option is required.')
headers = dict(
Authorization='apiToken %s' % args.api_key,
)
match = re.search(
r'^https://app.shippable.com/github/(?P<account>[^/]+)/(?P<project>[^/]+)/runs/(?P<run_number>[0-9]+)(?:/summary|(/(?P<job_number>[0-9]+)))?$',
args.run_id)
if not match:
match = re.search(r'^(?P<account>[^/]+)/(?P<project>[^/]+)/(?P<run_number>[0-9]+)$', args.run_id)
if match:
account = match.group('account')
project = match.group('project')
run_number = int(match.group('run_number'))
job_number = int(match.group('job_number')) if match.group('job_number') else None
if job_number:
if args.job_number:
exit('ERROR: job number found in url and specified with --job-number')
args.job_number = [job_number]
url = 'https://api.shippable.com/projects'
response = requests.get(url, dict(projectFullNames='%s/%s' % (account, project)), headers=headers)
if response.status_code != 200:
raise Exception(response.content)
project_id = response.json()[0]['id']
url = 'https://api.shippable.com/runs?projectIds=%s&runNumbers=%s' % (project_id, run_number)
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise Exception(response.content)
run = [run for run in response.json() if run['runNumber'] == run_number][0]
args.run_id = run['id']
elif re.search('^[a-f0-9]+$', args.run_id):
url = 'https://api.shippable.com/runs/%s' % args.run_id
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise Exception(response.content)
run = response.json()
account = run['subscriptionOrgName']
project = run['projectName']
run_number = run['runNumber']
else:
exit('ERROR: invalid run: %s' % args.run_id)
output_dir = '%s/%s/%s' % (account, project, run_number)
response = requests.get('https://api.shippable.com/jobs?runIds=%s' % args.run_id, headers=headers)
if response.status_code != 200:
raise Exception(response.content)
jobs = sorted(response.json(), key=lambda job: int(job['jobNumber']))
if not args.test:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if args.run_metadata:
path = os.path.join(output_dir, 'run.json')
contents = json.dumps(run, sort_keys=True, indent=4)
if args.verbose or args.test:
print(path)
if not args.test:
with open(path, 'w') as metadata_fd:
metadata_fd.write(contents)
for j in jobs:
job_id = j['id']
job_number = j['jobNumber']
if args.job_number and job_number not in args.job_number:
continue
if args.job_metadata:
path = os.path.join(output_dir, '%s/job.json' % job_number)
contents = json.dumps(j, sort_keys=True, indent=4)
if args.verbose or args.test:
print(path)
if not args.test:
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(path, 'w') as metadata_fd:
metadata_fd.write(contents)
if args.console_logs:
path = os.path.join(output_dir, '%s/console.log' % job_number)
url = 'https://api.shippable.com/jobs/%s/consoles?download=true' % job_id
download(args, headers, path, url, is_json=False)
if args.test_results:
path = os.path.join(output_dir, '%s/test.json' % job_number)
url = 'https://api.shippable.com/jobs/%s/jobTestReports' % job_id
download(args, headers, path, url)
extract_contents(args, path, os.path.join(output_dir, '%s/test' % job_number))
if args.coverage_results:
path = os.path.join(output_dir, '%s/coverage.json' % job_number)
url = 'https://api.shippable.com/jobs/%s/jobCoverageReports' % job_id
download(args, headers, path, url)
extract_contents(args, path, os.path.join(output_dir, '%s/coverage' % job_number))
def extract_contents(args, path, output_dir):
"""
:type args: any
:type path: str
:type output_dir: str
"""
if not args.test:
if not os.path.exists(path):
return
with open(path, 'r') as json_fd:
items = json.load(json_fd)
for item in items:
contents = item['contents'].encode('utf-8')
path = output_dir + '/' + re.sub('^/*', '', item['path'])
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
if args.verbose:
print(path)
if path.endswith('.json'):
contents = json.dumps(json.loads(contents), sort_keys=True, indent=4)
if not os.path.exists(path):
with open(path, 'w') as output_fd:
output_fd.write(contents)
def download(args, headers, path, url, is_json=True):
"""
:type args: any
:type headers: dict[str, str]
:type path: str
:type url: str
:type is_json: bool
"""
if args.verbose or args.test:
print(path)
if os.path.exists(path):
return
if not args.test:
response = requests.get(url, headers=headers)
if response.status_code != 200:
path += '.error'
if is_json:
content = json.dumps(response.json(), sort_keys=True, indent=4)
else:
content = response.content
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(path, 'w') as content_fd:
content_fd.write(content)
def get_api_key():
"""
rtype: str
"""
key = os.environ.get('SHIPPABLE_KEY', None)
if key:
return key
path = os.path.join(os.environ['HOME'], '.shippable.key')
try:
with open(path, 'r') as key_fd:
return key_fd.read().strip()
except IOError:
return None
if __name__ == '__main__':
main()
|
gpl-3.0
|
Nowheresly/odoo
|
addons/account/wizard/account_report_aged_partner_balance.py
|
378
|
4012
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_aged_trial_balance(osv.osv_memory):
_inherit = 'account.common.partner.report'
_name = 'account.aged.trial.balance'
_description = 'Account Aged Trial balance Report'
_columns = {
'period_length':fields.integer('Period Length (days)', required=True),
'direction_selection': fields.selection([('past','Past'),
('future','Future')],
'Analysis Direction', required=True),
'journal_ids': fields.many2many('account.journal', 'account_aged_trial_balance_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'period_length': 30,
'date_from': lambda *a: time.strftime('%Y-%m-%d'),
'direction_selection': 'past',
}
def _print_report(self, cr, uid, ids, data, context=None):
res = {}
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['period_length', 'direction_selection'])[0])
period_length = data['form']['period_length']
if period_length<=0:
raise osv.except_osv(_('User Error!'), _('You must set a period length greater than 0.'))
if not data['form']['date_from']:
raise osv.except_osv(_('User Error!'), _('You must set a start date.'))
start = datetime.strptime(data['form']['date_from'], "%Y-%m-%d")
if data['form']['direction_selection'] == 'past':
for i in range(5)[::-1]:
stop = start - relativedelta(days=period_length)
res[str(i)] = {
'name': (i!=0 and (str((5-(i+1)) * period_length) + '-' + str((5-i) * period_length)) or ('+'+str(4 * period_length))),
'stop': start.strftime('%Y-%m-%d'),
'start': (i!=0 and stop.strftime('%Y-%m-%d') or False),
}
start = stop - relativedelta(days=1)
else:
for i in range(5):
stop = start + relativedelta(days=period_length)
res[str(5-(i+1))] = {
'name': (i!=4 and str((i) * period_length)+'-' + str((i+1) * period_length) or ('+'+str(4 * period_length))),
'start': start.strftime('%Y-%m-%d'),
'stop': (i!=4 and stop.strftime('%Y-%m-%d') or False),
}
start = stop + relativedelta(days=1)
data['form'].update(res)
if data.get('form',False):
data['ids']=[data['form'].get('chart_account_id',False)]
return self.pool['report'].get_action(cr, uid, [], 'account.report_agedpartnerbalance', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
TianpeiLuke/GPy
|
GPy/inference/latent_function_inference/fitc.py
|
4
|
3029
|
# Copyright (c) 2012, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from posterior import Posterior
from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv
from ...util import diag
import numpy as np
from . import LatentFunctionInference
log_2_pi = np.log(2*np.pi)
class FITC(LatentFunctionInference):
"""
An object for inference when the likelihood is Gaussian, but we want to do sparse inference.
The function self.inference returns a Posterior object, which summarizes
the posterior.
"""
const_jitter = 1e-6
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
num_inducing, _ = Z.shape
num_data, output_dim = Y.shape
#make sure the noise is not hetero
sigma_n = likelihood.gaussian_variance(Y_metadata)
if sigma_n.size >1:
raise NotImplementedError, "no hetero noise with this implementation of FITC"
Kmm = kern.K(Z)
Knn = kern.Kdiag(X)
Knm = kern.K(X, Z)
U = Knm
#factor Kmm
diag.add(Kmm, self.const_jitter)
Kmmi, L, Li, _ = pdinv(Kmm)
#compute beta_star, the effective noise precision
LiUT = np.dot(Li, U.T)
sigma_star = Knn + sigma_n - np.sum(np.square(LiUT),0)
beta_star = 1./sigma_star
# Compute and factor A
A = tdot(LiUT*np.sqrt(beta_star)) + np.eye(num_inducing)
LA = jitchol(A)
# back substutue to get b, P, v
URiy = np.dot(U.T*beta_star,Y)
tmp, _ = dtrtrs(L, URiy, lower=1)
b, _ = dtrtrs(LA, tmp, lower=1)
tmp, _ = dtrtrs(LA, b, lower=1, trans=1)
v, _ = dtrtrs(L, tmp, lower=1, trans=1)
tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
P = tdot(tmp.T)
#compute log marginal
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
-np.sum(np.log(np.diag(LA)))*output_dim + \
0.5*output_dim*np.sum(np.log(beta_star)) + \
-0.5*np.sum(np.square(Y.T*np.sqrt(beta_star))) + \
0.5*np.sum(np.square(b))
#compute dL_dR
Uv = np.dot(U, v)
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./beta_star + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1))*beta_star**2
# Compute dL_dKmm
vvT_P = tdot(v.reshape(-1,1)) + P
dL_dK = 0.5*(Kmmi - vvT_P)
KiU = np.dot(Kmmi, U.T)
dL_dK += np.dot(KiU*dL_dR, KiU.T)
# Compute dL_dU
vY = np.dot(v.reshape(-1,1),Y.T)
dL_dU = vY - np.dot(vvT_P, U.T)
dL_dU *= beta_star
dL_dU -= 2.*KiU*dL_dR
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':dL_dR, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL}
#construct a posterior object
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L)
return post, log_marginal, grad_dict
|
bsd-3-clause
|
rdnelson/Libra
|
lib/gtest-1.6.0/scripts/pump.py
|
603
|
23316
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirevative(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines != [] and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirevative(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsHeaderGuardOrInclude(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsComment(line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirevative(output, line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapPreprocessorDirevative(line, output)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
gpl-3.0
|
MrNuggles/HeyBoet-Telegram-Bot
|
temboo/Library/RunKeeper/CommentThreads/PostComment.py
|
4
|
3446
|
# -*- coding: utf-8 -*-
###############################################################################
#
# PostComment
# Posts a comment about a user's activity.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class PostComment(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the PostComment Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(PostComment, self).__init__(temboo_session, '/Library/RunKeeper/CommentThreads/PostComment')
def new_input_set(self):
return PostCommentInputSet()
def _make_result_set(self, result, path):
return PostCommentResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return PostCommentChoreographyExecution(session, exec_id, path)
class PostCommentInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the PostComment
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Comment(self, value):
"""
Set the value of the Comment input for this Choreo. ((required, string) The comment text.)
"""
super(PostCommentInputSet, self)._set_input('Comment', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved after the final step in the OAuth process.)
"""
super(PostCommentInputSet, self)._set_input('AccessToken', value)
def set_URI(self, value):
"""
Set the value of the URI input for this Choreo. ((required, string) The URI of the activity thread to post a comment to (e.g., /fitnessActivities/327844402).)
"""
super(PostCommentInputSet, self)._set_input('URI', value)
class PostCommentResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the PostComment Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from RunKeeper.)
"""
return self._output.get('Response', None)
class PostCommentChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return PostCommentResultSet(response, path)
|
gpl-3.0
|
ltilve/ChromiumGStreamerBackend
|
tools/telemetry/third_party/gsutilz/third_party/boto/tests/unit/glacier/test_concurrent.py
|
88
|
7261
|
#!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import tempfile
from boto.compat import Queue
from tests.compat import mock, unittest
from tests.unit import AWSMockServiceTestCase
from boto.glacier.concurrent import ConcurrentUploader, ConcurrentDownloader
from boto.glacier.concurrent import UploadWorkerThread
from boto.glacier.concurrent import _END_SENTINEL
class FakeThreadedConcurrentUploader(ConcurrentUploader):
def _start_upload_threads(self, results_queue, upload_id,
worker_queue, filename):
self.results_queue = results_queue
self.worker_queue = worker_queue
self.upload_id = upload_id
def _wait_for_upload_threads(self, hash_chunks, result_queue, total_parts):
for i in range(total_parts):
hash_chunks[i] = b'foo'
class FakeThreadedConcurrentDownloader(ConcurrentDownloader):
def _start_download_threads(self, results_queue, worker_queue):
self.results_queue = results_queue
self.worker_queue = worker_queue
def _wait_for_download_threads(self, filename, result_queue, total_parts):
pass
class TestConcurrentUploader(unittest.TestCase):
def setUp(self):
super(TestConcurrentUploader, self).setUp()
self.stat_patch = mock.patch('os.stat')
self.addCleanup(self.stat_patch.stop)
self.stat_mock = self.stat_patch.start()
# Give a default value for tests that don't care
# what the file size is.
self.stat_mock.return_value.st_size = 1024 * 1024 * 8
def test_calculate_required_part_size(self):
self.stat_mock.return_value.st_size = 1024 * 1024 * 8
uploader = ConcurrentUploader(mock.Mock(), 'vault_name')
total_parts, part_size = uploader._calculate_required_part_size(
1024 * 1024 * 8)
self.assertEqual(total_parts, 2)
self.assertEqual(part_size, 4 * 1024 * 1024)
def test_calculate_required_part_size_too_small(self):
too_small = 1 * 1024 * 1024
self.stat_mock.return_value.st_size = 1024 * 1024 * 1024
uploader = ConcurrentUploader(mock.Mock(), 'vault_name',
part_size=too_small)
total_parts, part_size = uploader._calculate_required_part_size(
1024 * 1024 * 1024)
self.assertEqual(total_parts, 256)
# Part size if 4MB not the passed in 1MB.
self.assertEqual(part_size, 4 * 1024 * 1024)
def test_work_queue_is_correctly_populated(self):
uploader = FakeThreadedConcurrentUploader(mock.MagicMock(),
'vault_name')
uploader.upload('foofile')
q = uploader.worker_queue
items = [q.get() for i in range(q.qsize())]
self.assertEqual(items[0], (0, 4 * 1024 * 1024))
self.assertEqual(items[1], (1, 4 * 1024 * 1024))
# 2 for the parts, 10 for the end sentinels (10 threads).
self.assertEqual(len(items), 12)
def test_correct_low_level_api_calls(self):
api_mock = mock.MagicMock()
uploader = FakeThreadedConcurrentUploader(api_mock, 'vault_name')
uploader.upload('foofile')
# The threads call the upload_part, so we're just verifying the
# initiate/complete multipart API calls.
api_mock.initiate_multipart_upload.assert_called_with(
'vault_name', 4 * 1024 * 1024, None)
api_mock.complete_multipart_upload.assert_called_with(
'vault_name', mock.ANY, mock.ANY, 8 * 1024 * 1024)
def test_downloader_work_queue_is_correctly_populated(self):
job = mock.MagicMock()
job.archive_size = 8 * 1024 * 1024
downloader = FakeThreadedConcurrentDownloader(job)
downloader.download('foofile')
q = downloader.worker_queue
items = [q.get() for i in range(q.qsize())]
self.assertEqual(items[0], (0, 4 * 1024 * 1024))
self.assertEqual(items[1], (1, 4 * 1024 * 1024))
# 2 for the parts, 10 for the end sentinels (10 threads).
self.assertEqual(len(items), 12)
class TestUploaderThread(unittest.TestCase):
def setUp(self):
self.fileobj = tempfile.NamedTemporaryFile()
self.filename = self.fileobj.name
def test_fileobj_closed_when_thread_shuts_down(self):
thread = UploadWorkerThread(mock.Mock(), 'vault_name',
self.filename, 'upload_id',
Queue(), Queue())
fileobj = thread._fileobj
self.assertFalse(fileobj.closed)
# By settings should_continue to False, it should immediately
# exit, and we can still verify cleanup behavior.
thread.should_continue = False
thread.run()
self.assertTrue(fileobj.closed)
def test_upload_errors_have_exception_messages(self):
api = mock.Mock()
job_queue = Queue()
result_queue = Queue()
upload_thread = UploadWorkerThread(
api, 'vault_name', self.filename,
'upload_id', job_queue, result_queue, num_retries=1,
time_between_retries=0)
api.upload_part.side_effect = Exception("exception message")
job_queue.put((0, 1024))
job_queue.put(_END_SENTINEL)
upload_thread.run()
result = result_queue.get(timeout=1)
self.assertIn("exception message", str(result))
def test_num_retries_is_obeyed(self):
# total attempts is 1 + num_retries so if I have num_retries of 2,
# I'll attempt the upload once, and if that fails I'll retry up to
# 2 more times for a total of 3 attempts.
api = mock.Mock()
job_queue = Queue()
result_queue = Queue()
upload_thread = UploadWorkerThread(
api, 'vault_name', self.filename,
'upload_id', job_queue, result_queue, num_retries=2,
time_between_retries=0)
api.upload_part.side_effect = Exception()
job_queue.put((0, 1024))
job_queue.put(_END_SENTINEL)
upload_thread.run()
self.assertEqual(api.upload_part.call_count, 3)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
bikong2/django
|
tests/sitemaps_tests/base.py
|
380
|
1079
|
from django.apps import apps
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.test import TestCase, modify_settings, override_settings
from .models import I18nTestModel, TestModel
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sitemaps'})
@override_settings(ROOT_URLCONF='sitemaps_tests.urls.http')
class SitemapTestsBase(TestCase):
protocol = 'http'
sites_installed = apps.is_installed('django.contrib.sites')
domain = 'example.com' if sites_installed else 'testserver'
def setUp(self):
self.base_url = '%s://%s' % (self.protocol, self.domain)
cache.clear()
# Create an object for sitemap content.
TestModel.objects.create(name='Test Object')
self.i18n_model = I18nTestModel.objects.create(name='Test Object')
@classmethod
def setUpClass(cls):
super(SitemapTestsBase, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
|
bsd-3-clause
|
xavfernandez/pip
|
src/pip/_internal/wheel_builder.py
|
1
|
9857
|
"""Orchestrator for building wheels from InstallRequirements.
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import logging
import os.path
import re
import shutil
from pip._internal.models.link import Link
from pip._internal.operations.build.wheel import build_wheel_pep517
from pip._internal.operations.build.wheel_legacy import build_wheel_legacy
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import ensure_dir, hash_file, is_wheel_installed
from pip._internal.utils.setuptools_build import make_setuptools_clean_args
from pip._internal.utils.subprocess import call_subprocess
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs import vcs
if MYPY_CHECK_RUNNING:
from typing import (
Any, Callable, Iterable, List, Optional, Pattern, Tuple,
)
from pip._internal.cache import WheelCache
from pip._internal.req.req_install import InstallRequirement
BinaryAllowedPredicate = Callable[[InstallRequirement], bool]
BuildResult = Tuple[List[InstallRequirement], List[InstallRequirement]]
logger = logging.getLogger(__name__)
def _contains_egg_info(
s, _egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
# type: (str, Pattern[str]) -> bool
"""Determine whether the string looks like an egg_info.
:param s: The string to parse. E.g. foo-2.1
"""
return bool(_egg_info_re.search(s))
def _should_build(
req, # type: InstallRequirement
need_wheel, # type: bool
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
"""Return whether an InstallRequirement should be built into a wheel."""
if req.constraint:
# never build requirements that are merely constraints
return False
if req.is_wheel:
if need_wheel:
logger.info(
'Skipping %s, due to already being wheel.', req.name,
)
return False
if need_wheel:
# i.e. pip wheel, not pip install
return True
# From this point, this concerns the pip install command only
# (need_wheel=False).
if not req.use_pep517 and not is_wheel_installed():
# we don't build legacy requirements if wheel is not installed
return False
if req.editable or not req.source_dir:
return False
if not check_binary_allowed(req):
logger.info(
"Skipping wheel build for %s, due to binaries "
"being disabled for it.", req.name,
)
return False
return True
def should_build_for_wheel_command(
req, # type: InstallRequirement
):
# type: (...) -> bool
return _should_build(
req, need_wheel=True, check_binary_allowed=_always_true
)
def should_build_for_install_command(
req, # type: InstallRequirement
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
return _should_build(
req, need_wheel=False, check_binary_allowed=check_binary_allowed
)
def _should_cache(
req, # type: InstallRequirement
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> Optional[bool]
"""
Return whether a built InstallRequirement can be stored in the persistent
wheel cache, assuming the wheel cache is available, and _should_build()
has determined a wheel needs to be built.
"""
if not should_build_for_install_command(
req, check_binary_allowed=check_binary_allowed
):
# never cache if pip install would not have built
# (editable mode, etc)
return False
if req.link and req.link.is_vcs:
# VCS checkout. Do not cache
# unless it points to an immutable commit hash.
assert not req.editable
assert req.source_dir
vcs_backend = vcs.get_backend_for_scheme(req.link.scheme)
assert vcs_backend
if vcs_backend.is_immutable_rev_checkout(req.link.url, req.source_dir):
return True
return False
base, ext = req.link.splitext()
if _contains_egg_info(base):
return True
# Otherwise, do not cache.
return False
def _get_cache_dir(
req, # type: InstallRequirement
wheel_cache, # type: WheelCache
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> str
"""Return the persistent or temporary cache directory where the built
wheel need to be stored.
"""
cache_available = bool(wheel_cache.cache_dir)
if (
cache_available and
_should_cache(req, check_binary_allowed)
):
cache_dir = wheel_cache.get_path_for_link(req.link)
else:
cache_dir = wheel_cache.get_ephem_path_for_link(req.link)
return cache_dir
def _always_true(_):
# type: (Any) -> bool
return True
def _build_one(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
return None
# Install build deps into temporary directory (PEP 518)
with req.build_env:
return _build_one_inside_env(
req, output_dir, build_options, global_options
)
def _build_one_inside_env(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
with TempDirectory(kind="wheel") as temp_dir:
if req.use_pep517:
wheel_path = build_wheel_pep517(
name=req.name,
backend=req.pep517_backend,
metadata_directory=req.metadata_directory,
build_options=build_options,
tempd=temp_dir.path,
)
else:
wheel_path = build_wheel_legacy(
name=req.name,
setup_py_path=req.setup_py_path,
source_dir=req.unpacked_source_directory,
global_options=global_options,
build_options=build_options,
tempd=temp_dir.path,
)
if wheel_path is not None:
wheel_name = os.path.basename(wheel_path)
dest_path = os.path.join(output_dir, wheel_name)
try:
wheel_hash, length = hash_file(wheel_path)
shutil.move(wheel_path, dest_path)
logger.info('Created wheel for %s: '
'filename=%s size=%d sha256=%s',
req.name, wheel_name, length,
wheel_hash.hexdigest())
logger.info('Stored in directory: %s', output_dir)
return dest_path
except Exception as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
# Ignore return, we can't do anything else useful.
if not req.use_pep517:
_clean_one_legacy(req, global_options)
return None
def _clean_one_legacy(req, global_options):
# type: (InstallRequirement, List[str]) -> bool
clean_args = make_setuptools_clean_args(
req.setup_py_path,
global_options=global_options,
)
logger.info('Running setup.py clean for %s', req.name)
try:
call_subprocess(clean_args, cwd=req.source_dir)
return True
except Exception:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(
requirements, # type: Iterable[InstallRequirement]
wheel_cache, # type: WheelCache
build_options, # type: List[str]
global_options, # type: List[str]
check_binary_allowed=None, # type: Optional[BinaryAllowedPredicate]
):
# type: (...) -> BuildResult
"""Build wheels.
:return: The list of InstallRequirement that succeeded to build and
the list of InstallRequirement that failed to build.
"""
if check_binary_allowed is None:
# Binaries allowed by default.
check_binary_allowed = _always_true
if not requirements:
return [], []
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join(req.name for req in requirements),
)
with indent_log():
build_successes, build_failures = [], []
for req in requirements:
cache_dir = _get_cache_dir(
req, wheel_cache, check_binary_allowed
)
wheel_file = _build_one(
req, cache_dir, build_options, global_options
)
if wheel_file:
# Update the link for this.
req.link = Link(path_to_url(wheel_file))
req.local_file_path = req.link.file_path
assert req.link.is_wheel
build_successes.append(req)
else:
build_failures.append(req)
# notify success/failure
if build_successes:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_successes]),
)
if build_failures:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failures]),
)
# Return a list of requirements that failed to build
return build_successes, build_failures
|
mit
|
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/idna/codec.py
|
426
|
3299
|
from .core import encode, decode, alabel, ulabel, IDNAError
import codecs
import re
_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
class Codec(codecs.Codec):
def encode(self, data, errors='strict'):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return "", 0
return encode(data), len(data)
def decode(self, data, errors='strict'):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return u"", 0
return decode(data), len(data)
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
def _buffer_encode(self, data, errors, final):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return ("", 0)
labels = _unicode_dots_re.split(data)
trailing_dot = u''
if labels:
if not labels[-1]:
trailing_dot = '.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = '.'
result = []
size = 0
for label in labels:
result.append(alabel(label))
if size:
size += 1
size += len(label)
# Join with U+002E
result = ".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, data, errors, final):
if errors != 'strict':
raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
return (u"", 0)
# IDNA allows decoding to operate on Unicode strings, too.
if isinstance(data, unicode):
labels = _unicode_dots_re.split(data)
else:
# Must be ASCII string
data = str(data)
unicode(data, "ascii")
labels = data.split(".")
trailing_dot = u''
if labels:
if not labels[-1]:
trailing_dot = u'.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = u'.'
result = []
size = 0
for label in labels:
result.append(ulabel(label))
if size:
size += 1
size += len(label)
result = u".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(
name='idna',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
mit
|
fernandopso/python_koans
|
python2/koans/about_packages.py
|
63
|
2143
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This is very different to AboutModules in Ruby Koans
# Our AboutMultipleInheritance class is a little more comparable
#
from runner.koan import *
#
# Package hierarchy of Python Koans project:
#
# contemplate_koans.py
# koans/
# __init__.py
# about_asserts.py
# about_attribute_access.py
# about_class_attributes.py
# about_classes.py
# ...
# a_package_folder/
# __init__.py
# a_module.py
class AboutPackages(Koan):
def test_subfolders_can_form_part_of_a_module_package(self):
# Import ./a_package_folder/a_module.py
from a_package_folder.a_module import Duck
duck = Duck()
self.assertEqual(__, duck.name)
def test_subfolders_become_modules_if_they_have_an_init_module(self):
# Import ./a_package_folder/__init__.py
from a_package_folder import an_attribute
self.assertEqual(__, an_attribute)
def test_subfolders_without_an_init_module_are_not_part_of_the_package(self):
# Import ./a_normal_folder/
try:
import a_normal_folder
except ImportError as ex:
self.assertMatch(__, ex[0])
# ------------------------------------------------------------------
def test_use_absolute_imports_to_import_upper_level_modules(self):
# Import /contemplate_koans.py
import contemplate_koans
self.assertEqual(__, contemplate_koans.__name__)
# contemplate_koans.py is the root module in this package because its
# the first python module called in koans.
#
# If contemplate_koans.py was based in a_package_folder that would be
# the root folder, which would make reaching the koans folder
# almost impossible. So always leave the starting python script in
# a folder which can reach everything else.
def test_import_a_module_in_a_subfolder_using_an_absolute_path(self):
# Import contemplate_koans.py/koans/a_package_folder/a_module.py
from koans.a_package_folder.a_module import Duck
self.assertEqual(__, Duck.__module__)
|
mit
|
disqus/kombu
|
kombu/utils/compat.py
|
1
|
7887
|
import sys
############## __builtin__.all ##############################################
try:
all([True])
all = all
except NameError:
def all(iterable):
for item in iterable:
if not item:
return False
return True
############## __builtin__.any ##############################################
try:
any([True])
any = any
except NameError:
def any(iterable):
for item in iterable:
if item:
return True
return False
############## collections.OrderedDict #######################################
import weakref
try:
from collections import MutableMapping
except ImportError:
from UserDict import DictMixin as MutableMapping
from itertools import imap as _imap
from operator import eq as _eq
class _Link(object):
"""Doubly linked list."""
__slots__ = 'prev', 'next', 'key', '__weakref__'
class CompatOrderedDict(dict, MutableMapping):
"""Dictionary that remembers insertion order"""
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular
# dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly
# linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The prev/next links are weakref proxies (to prevent circular
# references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from
# an OrderedDict.
__marker = object()
def __init__(self, *args, **kwds):
"""Initialize an ordered dictionary.
Signature is the same as for regular dictionaries, but keyword
arguments are not recommended because their insertion order is
arbitrary.
"""
if len(args) > 1:
raise TypeError("expected at most 1 arguments, got %d" % (
len(args)))
try:
self.__root
except AttributeError:
# sentinel node for the doubly linked list
self.__root = root = _Link()
root.prev = root.next = root
self.__map = {}
self.update(*args, **kwds)
def clear(self):
"od.clear() -> None. Remove all items from od."
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def __setitem__(self, key, value):
"od.__setitem__(i, y) <==> od[i]=y"
# Setting a new item creates a new link which goes at the end of the
# linked list, and the inherited dictionary is updated with the new
# key/value pair.
if key not in self:
self.__map[key] = link = _Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = root.prev = weakref.proxy(link)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""od.__delitem__(y) <==> del od[y]"""
# Deleting an existing item uses self.__map to find the
# link which is then removed by updating the links in the
# predecessor and successor nodes.
dict.__delitem__(self, key)
link = self.__map.pop(key)
link.prev.next = link.next
link.next.prev = link.prev
def __iter__(self):
"""od.__iter__() <==> iter(od)"""
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
"""od.__reversed__() <==> reversed(od)"""
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def __reduce__(self):
"""Return state information for pickling"""
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__root
del(self.__map, self.__root)
inst_dict = vars(self).copy()
self.__map, self.__root = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def update(self, other=(), **kwds):
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def values(self):
return [self[key] for key in self]
def items(self):
return [(key, self[key]) for key in self]
def itervalues(self):
for key in self:
yield self[key]
def iteritems(self):
for key in self:
yield (key, self[key])
def iterkeys(self):
return iter(self)
def keys(self):
return list(self)
def popitem(self, last=True):
"""od.popitem() -> (k, v)
Return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO
order if false.
"""
if not self:
raise KeyError('dictionary is empty')
if last:
if sys.platform.startswith("java"):
key = self.keys()[-1]
else:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __repr__(self):
"od.__repr__() <==> repr(od)"
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
"od.copy() -> a shallow copy of od"
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
"""OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None)."""
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
"""od.__eq__(y) <==> od==y. Comparison to another OD is
order-sensitive while comparison to a regular mapping
is order-insensitive."""
if isinstance(other, OrderedDict):
return len(self) == len(other) and \
all(_imap(_eq, self.iteritems(), other.iteritems()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not (self == other)
try:
from collections import OrderedDict
except ImportError:
OrderedDict = CompatOrderedDict
############## queue.LifoQueue ##############################################
from Queue import Queue
class LifoQueue(Queue):
def _init(self, maxsize):
self.queue = []
self.maxsize = maxsize
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
|
bsd-3-clause
|
longyangking/ML
|
keras/GAN.py
|
1
|
7748
|
'''
Learn Gaussian Distribution
'''
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Reshape
from keras.layers import Conv2D, Conv2DTranspose, UpSampling2D
from keras.layers import LeakyReLU, Dropout
from keras.layers import BatchNormalization
from keras.optimizers import Adam, RMSprop
import matplotlib.pyplot as plt
class DCGAN(object):
def __init__(self, img_rows, img_cols, channel,input_dim):
self.img_rows = img_rows
self.img_cols = img_cols
self.channel = channel
self.input_dim = input_dim
self.D = None # discriminator
self.G = None # generator
self.AM = None # adversarial model
self.DM = None # discriminator model
# (W−F+2P)/S+1
def discriminator(self):
if self.D:
return self.D
self.D = Sequential()
depth = 64
dropout = 0.4
# In: 28 x 28 x 1, depth = 1
# Out: 14 x 14 x 1, depth=64
input_shape = (self.img_rows, self.img_cols, self.channel)
self.D.add(Conv2D(depth*1, 5, strides=2, input_shape=input_shape,\
padding='same'))
self.D.add(LeakyReLU(alpha=0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*2, 5, strides=2, padding='same'))
self.D.add(LeakyReLU(alpha=0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*4, 5, strides=2, padding='same'))
self.D.add(LeakyReLU(alpha=0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*8, 5, strides=1, padding='same'))
self.D.add(LeakyReLU(alpha=0.2))
self.D.add(Dropout(dropout))
# Out: 1-dim probability
self.D.add(Flatten())
self.D.add(Dense(1))
self.D.add(Activation('sigmoid'))
self.D.summary()
return self.D
def generator(self):
if self.G:
return self.G
self.G = Sequential()
dropout = 0.4
depth = 4*int((self.img_cols + self.img_rows)/2)
dim = int(self.img_cols/4)
# In: 100
# Out: dim x dim x depth
self.G.add(Dense(dim*dim*depth, input_dim=self.input_dim))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(Reshape((dim, dim, depth)))
self.G.add(Dropout(dropout))
# In: dim x dim x depth
# Out: 2*dim x 2*dim x depth/2
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(int(depth/2), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(int(depth/4), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(Conv2DTranspose(int(depth/8), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
# Out: 28 x 28 x 1 grayscale image [0.0,1.0] per pix
self.G.add(Conv2DTranspose(1, 5, padding='same'))
self.G.add(Activation('sigmoid'))
self.G.summary()
return self.G
def discriminator_model(self):
if self.DM:
return self.DM
optimizer = RMSprop(lr=0.0002, decay=6e-8)
self.DM = Sequential()
self.DM.add(self.discriminator())
self.DM.compile(loss='binary_crossentropy', optimizer=optimizer,\
metrics=['accuracy'])
return self.DM
def adversarial_model(self):
if self.AM:
return self.AM
optimizer = RMSprop(lr=0.0001, decay=3e-8)
self.AM = Sequential()
self.AM.add(self.generator())
self.AM.add(self.discriminator())
self.AM.compile(loss='binary_crossentropy', optimizer=optimizer,\
metrics=['accuracy'])
return self.AM
class GAUSSIAN_DCGAN:
def __init__(self,img_rows,img_cols,channel,input_data,input_dim):
self.img_rows = img_rows
self.img_cols = img_cols
self.channel = channel
self.input_dim = input_dim
self.x_train = input_data.reshape(-1, self.img_rows,self.img_cols, 1).astype(np.float32)
self.DCGAN = DCGAN(img_rows=img_rows, img_cols=img_cols, channel=channel,input_dim=input_dim)
self.discriminator = self.DCGAN.discriminator_model()
self.adversarial = self.DCGAN.adversarial_model()
self.generator = self.DCGAN.generator()
def train(self, train_steps=2000, batch_size=256, save_interval=0,samples=16):
noise_input = None
if save_interval>0:
noise_input = np.random.uniform(-1.0, 1.0, size=[samples, self.input_dim])
for i in range(train_steps):
images_train = self.x_train[np.random.randint(0,self.x_train.shape[0], size=batch_size), :, :, :]
noise = np.random.uniform(-1.0, 1.0, size=[batch_size, self.input_dim])
images_fake = self.generator.predict(noise)
x = np.concatenate((images_train, images_fake))
y = np.ones([2*batch_size, 1])
y[batch_size:, :] = 0
d_loss = self.discriminator.train_on_batch(x, y)
y = np.ones([batch_size, 1])
noise = np.random.uniform(-1.0, 1.0, size=[batch_size, self.input_dim])
a_loss = self.adversarial.train_on_batch(noise, y)
log_mesg = "%d: [D loss: %f, acc: %f]" % (i, d_loss[0], d_loss[1])
log_mesg = "%s [A loss: %f, acc: %f]" % (log_mesg, a_loss[0], a_loss[1])
print(log_mesg)
if save_interval>0:
if (i+1)%save_interval==0:
self.plot_images(save2file=True, samples=noise_input.shape[0],\
noise=noise_input, step=(i+1))
def plot_images(self, save2file=False, fake=True, samples=16, noise=None, step=0):
filename = 'mnist.png'
if fake:
if noise is None:
noise = np.random.uniform(-1.0, 1.0, size=[samples, 100])
else:
filename = "mnist_%d.png" % step
images = self.generator.predict(noise)
else:
i = np.random.randint(0, self.x_train.shape[0], samples)
images = self.x_train[i, :, :, :]
plt.figure(figsize=(10,10))
for i in range(images.shape[0]):
plt.subplot(4, 4, i+1)
image = images[i, :, :, :]
image = np.reshape(image, [self.img_rows, self.img_cols])
plt.imshow(image, cmap='gray')
plt.axis('off')
plt.tight_layout()
if save2file:
plt.savefig(filename)
plt.close('all')
else:
plt.show()
if __name__ == '__main__':
input_dim = 100
img_rows,img_cols,channel = 4*7,4*7,1
n_samples = 1000
input_data = np.zeros((n_samples,img_rows,img_cols))
print('Perparing....')
for i in range(n_samples):
if i < n_samples/3:
x = np.arange(img_rows)
y = np.arange(img_cols)
xv,yv = np.meshgrid(x,y)
input_data[i,:,:] = np.exp(-(np.square(xv-img_rows/2) + np.square(yv-img_cols/2))/10)
elif i > n_samples/3*2:
input_data[i,int(img_rows/3):int(img_rows/3*2),:] = 1
else:
input_data[i,:,int(img_rows/3):int(img_rows/3*2)] = 1
print('Training....')
#plt.imshow(input_data[0,:,:])
#plt.show()
gaussian_dcgan = GAUSSIAN_DCGAN(img_rows,img_cols,channel,input_data,input_dim)
gaussian_dcgan.train(train_steps=200, batch_size=30, save_interval=0)
#gaussian_dcgan.plot_images(fake=True)
gaussian_dcgan.plot_images(fake=True, save2file=True)
|
lgpl-3.0
|
edfungus/Crouton-Python-Example
|
env/lib/python2.7/site-packages/pip/_vendor/progress/helpers.py
|
404
|
2894
|
# Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
from __future__ import unicode_literals
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0)
|
mit
|
cwoodruf/ygnews
|
ygpurge.py
|
1
|
2187
|
#!/usr/bin/env python
"""
author: Cal Woodruff, [email protected]
remove old data from various tables
should be run daily to ensure the db runs optimally
"""
import ygdb
import time
start = time.time()
conn = ygdb.conn()
cleanup = conn.cursor()
print "purging features"
cleanup.execute("delete from features where created_at < now() - interval 2 day")
conn.commit()
elapsed = time.time() - start
print "purging tweets",elapsed,"s"
cleanup.execute("delete from tweets where id not in (select id from features)")
conn.commit()
elapsed = time.time() - start
print "purging tweetwords",elapsed,"s"
cleanup.execute("delete from tweetwords where id not in (select id from features)")
conn.commit()
elapsed = time.time() - start
print "purging links",elapsed,"s"
cleanup.execute("delete from links where id not in (select id from features)")
conn.commit()
elapsed = time.time() - start
print "purging tweetbigrams",elapsed,"s"
cleanup.execute("delete from tweetbigrams where id not in (select id from features)")
conn.commit()
elapsed = time.time() - start
print "purging similarity",elapsed,"s"
cleanup.execute("delete from similarity where id1 not in (select id from features)")
cleanup.execute("delete from similarity where id2 not in (select id from features)")
conn.commit()
elapsed = time.time() - start
print "purging pairs",elapsed,"s"
# deleting stuff from the pairs table doesn't seem to be happening as expected ...
# this takes a lot longer but is guaranteed to produce just what we want
# we assume that ygpairs.py has been run first
try:
cleanup.execute("drop table pairs_new")
except:
pass
cleanup.execute("create table pairs_new (like pairs_template)")
cleanup.execute(
"insert into pairs_new "
"select * from pairs "
"where id1 in (select id from features where selected is not null) "
"and id2 in (select id from features where selected is not null) "
"and ip is not null"
)
cleanup.execute("alter table pairs rename to pairs_deleted")
cleanup.execute("alter table pairs_new rename to pairs")
cleanup.execute("drop table pairs_deleted")
conn.commit()
elapsed = time.time() - start
print "finished",elapsed
cleanup.close()
conn.close()
print "done"
|
gpl-3.0
|
skyportsystems/vitess
|
py/vtdb/sql_builder.py
|
7
|
31542
|
"""Helper classes for building queries.
Helper classes and fucntions for building queries.
"""
import itertools
import pprint
import time
# TODO(dumbunny): integration with SQL Alchemy ?
class DBRow(object):
"""An object with an attr for every column returned by a query."""
def __init__(self, column_names=None, row_tuple=None, **overrides):
"""Init DBRow from zip(column_names, row_tuple) and/or overrides.
Args:
column_names: List or tuple of str column names.
row_tuple: List or tuple of str column values.
**overrides: Additional (str name: value) pairs.
Raises:
ValueError: If len(column_names) and len(row_tuple) differ.
"""
column_names = column_names or ()
row_tuple = row_tuple or ()
if len(column_names) != len(row_tuple):
raise ValueError('column_names / row_tuple mismatch.')
self.__dict__ = dict(zip(column_names, row_tuple), **overrides)
def __repr__(self):
return pprint.pformat(self.__dict__, 4)
def select_clause(
select_columns, table_name, alias=None, order_by=None):
"""Build the select clause for a query.
Args:
select_columns: Str column names.
table_name: Str table name.
alias: Str alias for table if defined.
order_by: Str, str list, or str list list of words.
where each list element is an order by expr where each expr is a
str ('col_a ASC' or 'col_a') or a list of words (['col_a', 'ASC']).
Returns:
Str like "SELECT col_a, col_b FROM my_table".
"""
if alias:
return 'SELECT %s FROM %s %s' % (
colstr(select_columns, alias, order_by=order_by),
table_name, alias)
return 'SELECT %s FROM %s' % (
colstr(select_columns, alias, order_by=order_by),
table_name)
def colstr(
select_columns, alias=None, bind=None, order_by=None):
"""Return columns clause for a SELECT query.
Args:
select_columns: Str column names.
alias: Table alias for these columns.
bind: A list of columns to get. Ignore columns not in bind.
order_by: A str or item list, where each item is a str or a str list
of words. Example: ['col_a', ('col_b', 'ASC')]. This is only
used in the client_aggregate option of select_by_columns_query;
normally, order_by should be None.
Returns:
Comma-delimited names of columns.
"""
# avoid altering select_columns parameter.
cols = select_columns[:]
# In the case of a scatter/gather, prepend these columns to
# facilitate an in-code sort - after that, we can just strip these
# off and process normally.
if order_by:
words_list = _normalize_order_by(order_by)
cols = [words[0] for words in words_list] + cols
if not bind:
bind = cols
def col_with_prefix(col):
"""Prepend alias to col if it makes sense."""
if isinstance(col, BaseSQLSelectExpr):
return col.select_sql(alias)
if alias and '.' not in col:
col = '%s.%s' % (alias, col)
return col
return ', '.join([col_with_prefix(c) for c in cols if c in bind])
def build_values_clause(columns, bind_vars):
"""Builds values clause for an INSERT query.
Ignore columns that do not have an associated bind var.
Update bind_vars.
Args:
columns: Str column list.
bind_vars: A (str: value) dict of bind variables.
Returns:
Str comma-delimited SQL format like '%(status)s, %(type)s',
list of names of columns, like ['status', 'type'].
"""
clause_parts = []
bind_list = []
for column in columns:
if column in bind_vars:
bind_list.append(column)
if isinstance(bind_vars[column], BaseSQLInsertExpr):
sql, new_bind_vars = bind_vars[column].build_insert_sql()
bind_vars[column] = sql
update_bind_vars(bind_vars, new_bind_vars)
clause_parts.append('%%(%s)s' % column)
elif column in ('time_created', 'time_updated'):
bind_list.append(column)
clause_parts.append('%%(%s)s' % column)
bind_vars[column] = int(time.time())
return ', '.join(clause_parts), bind_list
def build_in(column, items, alt_name=None, counter=None):
"""Build SQL IN statement and bind dict.
Args:
column: Str column name.
items: List of 1 or more values for IN statement.
alt_name: Name to use for format token keys. Use column by default.
counter: An itertools.count object.
Returns:
Str comma-delimited SQL format, (str: value) dict corresponding
to format tokens.
Raises:
ValueError: On bad input.
"""
if not items:
raise ValueError('Called with empty items')
base = alt_name if alt_name else column
bind_list = make_bind_list(base, items, counter=counter)
sql = '%s IN (%s)' % (
column, ', '.join('%(' + pair[0] + ')s' for pair in bind_list))
return sql, dict(bind_list)
def build_order_clause(order_by):
"""Get SQL for ORDER BY clause.
Args:
order_by: A str or item list, where each item is a str or a str list
of words. Example: ['col_a', ('col_b', 'ASC')].
Returns:
The str 'ORDER BY ...' clause or ''.
"""
if not order_by:
return ''
words_list = _normalize_order_by(order_by)
return 'ORDER BY %s' % ', '.join(' '.join(words) for words in words_list)
def build_group_clause(group_by):
"""Build group_by clause for a query."""
if not group_by:
return ''
if not isinstance(group_by, (tuple, list)):
group_by = (group_by,)
return 'GROUP BY %s' % ', '.join(group_by)
def build_limit_clause(limit):
"""Build limit clause for a query.
Get a LIMIT clause and bind vars. The LIMIT clause will have either
the form "LIMIT count" "LIMIT offset, count", or be the empty string.
or the empty string.
Args:
limit: None, int or 1- or 2-element list or tuple.
Returns:
A (str LIMIT clause, bind vars) pair.
"""
if limit is None:
return '', {}
if not isinstance(limit, (list, tuple)):
limit = (limit,)
bind_vars = {'limit_row_count': limit[0]}
if len(limit) == 1:
return 'LIMIT %(limit_row_count)s', bind_vars
bind_vars = {'limit_offset': limit[0],
'limit_row_count': limit[1]}
return 'LIMIT %(limit_offset)s, %(limit_row_count)s', bind_vars
def build_where_clause(column_value_pairs):
"""Build the WHERE clause for a query.
Args:
column_value_pairs: A (str, value) list of where expr pairs.
Returns:
A (str WHERE clause, (str: value) dict bind vars) pair.
"""
condition_list = []
bind_vars = {}
counter = itertools.count(1)
for column, value in column_value_pairs:
if isinstance(value, BaseSQLWhereExpr):
clause, clause_bind_vars = value.build_where_sql(column, counter=counter)
update_bind_vars(bind_vars, clause_bind_vars)
condition_list.append(clause)
elif isinstance(value, (tuple, list, set)):
if value:
if isinstance(value, set):
value = sorted(value)
in_clause, in_bind_variables = build_in(
column, value, counter=counter)
update_bind_vars(bind_vars, in_bind_variables)
condition_list.append(in_clause)
else:
condition_list.append('1 = 0')
else:
bind_name = choose_bind_name(column, counter=counter)
update_bind_vars(bind_vars, {bind_name: value})
condition_list.append('%s = %%(%s)s' % (column, bind_name))
# This seems like a hack to avoid returning an empty bind_vars.
if not bind_vars:
bind_vars = dict(column_value_pairs)
where_clause = ' AND '.join(condition_list)
return where_clause, bind_vars
def select_by_columns_query(
select_column_list, table_name, column_value_pairs=None,
order_by=None, group_by=None, limit=None, for_update=False,
client_aggregate=False, vt_routing_info=None):
"""Get query and bind vars for a SELECT statement.
Args:
select_column_list: Str column names.
table_name: Str name of table.
column_value_pairs: A (str, value) list of where expr pairs.
order_by: A str or item list, where each item is a str or a str list
of words. Example: ['col_a', ('col_b', 'ASC')]. This is only
used if client_aggregate is True.
group_by: A str or str list of comma-delimited exprs.
limit: An int count or (int offset, int count) pair.
for_update: True for SELECT ... FOR UPDATE query.
client_aggregate: If True, a fetch_aggregate will be sent to
the cursor. This is used in a few places to return a sorted,
limited list from a scatter query. It does not seem very useful.
vt_routing_info: A vtrouting.VTRoutingInfo object that specifies
a keyrange and a keyspace_id-bounding where clause.
Returns:
A (str SELECT query, (str: value) dict bind vars) pair.
"""
if client_aggregate:
clause_list = [select_clause(select_column_list, table_name,
order_by=order_by)]
else:
clause_list = [select_clause(select_column_list, table_name)]
# generate WHERE clause and bind variables
if column_value_pairs:
where_clause, bind_vars = build_where_clause(column_value_pairs)
# add vt routing info
if vt_routing_info:
where_clause, bind_vars = vt_routing_info.update_where_clause(
where_clause, bind_vars)
clause_list += ['WHERE', where_clause]
else:
bind_vars = {}
if group_by:
clause_list.append(build_group_clause(group_by))
if order_by:
clause_list.append(build_order_clause(order_by))
if limit:
clause, limit_bind_vars = build_limit_clause(limit)
clause_list.append(clause)
update_bind_vars(bind_vars, limit_bind_vars)
if for_update:
clause_list.append('FOR UPDATE')
query = ' '.join(clause_list)
return query, bind_vars
def update_columns_query(table_name, where_column_value_pairs=None,
update_column_value_pairs=None, limit=None,
order_by=None):
"""Get query and bind vars for an update statement.
Args:
table_name: Str name of table.
where_column_value_pairs: A (str, value) list of where expr pairs.
update_column_value_pairs: A (str, value) list of update set
pairs.
limit: An optional int count or (int offset, int count) pair.
order_by: A str or expr list, where each expr is a str or a str list
of words. Example: ['col_a', ('col_b', 'ASC')].
Returns:
A (str UPDATE query, (str: value) dict bind vars) pair.
Raises:
ValueError: On bad input.
"""
if not where_column_value_pairs:
# We could allow for no where clause, but this is a notoriously
# error-prone construct, so, no.
raise ValueError(
'No where_column_value_pairs: %s.' % (where_column_value_pairs,))
if not update_column_value_pairs:
raise ValueError(
'No update_column_value_pairs: %s.' % (update_column_value_pairs,))
clause_list = []
bind_vars = {}
for i, (column, value) in enumerate(update_column_value_pairs):
if isinstance(value, BaseSQLUpdateExpr):
clause, clause_bind_vars = value.build_update_sql(column)
clause_list.append(clause)
update_bind_vars(bind_vars, clause_bind_vars)
else:
clause_list.append('%s = %%(update_set_%s)s' % (column, i))
bind_vars['update_set_%s' % i] = value
set_clause = ', '.join(clause_list)
where_clause, where_bind_vars = build_where_clause(where_column_value_pairs)
update_bind_vars(bind_vars, where_bind_vars)
query = ('UPDATE %(table)s SET %(set_clause)s WHERE %(where_clause)s'
% {'table': table_name, 'set_clause': set_clause,
'where_clause': where_clause})
additional_clauses = []
if order_by:
additional_clauses.append(build_order_clause(order_by))
if limit:
limit_clause, limit_bind_vars = build_limit_clause(limit)
additional_clauses.append(limit_clause)
update_bind_vars(bind_vars, limit_bind_vars)
if additional_clauses:
query += ' ' + ' '.join(additional_clauses)
return query, bind_vars
def delete_by_columns_query(table_name, where_column_value_pairs=None,
limit=None):
"""Get query and bind vars for a delete statement.
Args:
table_name: Str name of table.
where_column_value_pairs: A (str, value) list of where expr pairs.
limit: An optional int count or (int offset, int count) pair.
Returns:
A (str delete SQL query, (str: value) dict) pair.
"""
where_clause, bind_vars = build_where_clause(where_column_value_pairs)
limit_clause, limit_bind_vars = build_limit_clause(limit)
update_bind_vars(bind_vars, limit_bind_vars)
query = (
'DELETE FROM %(table_name)s WHERE %(where_clause)s %(limit_clause)s' %
{'table_name': table_name, 'where_clause': where_clause,
'limit_clause': limit_clause})
return query, bind_vars
def insert_query(table_name, columns, **bind_vars):
"""Return SQL for an INSERT INTO ... VALUES call.
Args:
table_name: Str name of table.
columns: Str column names.
**bind_vars: (str: value) dict of variables, with automatic
columns like 'time_created' possibly added.
Returns:
A (str SQL, (str: value) dict bind vars pair.
"""
values_clause, bind_list = build_values_clause(
columns, bind_vars)
query = 'INSERT INTO %s (%s) VALUES (%s)' % (
table_name, colstr(columns, bind=bind_list), values_clause)
return query, bind_vars
def build_aggregate_query(table_name, id_column_name, is_asc=False):
"""Return query, bind_vars for a table-wide min or max query."""
query, bind_vars = select_by_columns_query(
select_column_list=[id_column_name], table_name=table_name,
order_by=[(id_column_name, 'ASC' if is_asc else 'DESC')],
limit=1)
return query, bind_vars
def build_count_query(table_name, column_value_pairs):
"""Return query, bind_vars for a count query."""
return select_by_columns_query(
select_column_list=[Count()], table_name=table_name,
column_value_pairs=column_value_pairs)
def choose_bind_name(base, counter):
return '%s_%d' % (base, counter.next())
def make_bind_list(column, values, counter=None):
"""Return (bind_name, value) list for each value."""
result = []
bind_names = []
if counter is None:
counter = itertools.count(1)
for value in values:
bind_name = choose_bind_name(column, counter=counter)
bind_names.append(bind_name)
result.append((bind_name, value))
return result
class BaseSQLUpdateExpr(object):
"""Return SQL for an UPDATE expression.
Expr is used in: UPDATE ... SET expr [, expr ..] WHERE ...;
It should have the form "col_name = ..."
"""
def build_update_sql(self, column_name):
"""Return SQL and bind_vars for an UPDATE SET expression.
Args:
column_name: Str name of column to update.
Returns:
A (str SQL, (str: value) dict bind_vars) pair.
"""
raise NotImplementedError
class RawSQLUpdateExpr(BaseSQLUpdateExpr):
"""A parameterized update expr.
This is the simplest base class for an SQLUpdateExpr that is
not also an SQLInsertExpr.
See BaseSQLInsertExpr.
"""
right_expr = None
def __init__(self, right_expr=None, **bind_vars):
"""Pass in the right_expr and bind_vars.
Either right_expr or the right_expr class variable should be
defined.
Args:
right_expr: Str SQL on the right side of '=' in the update expr.
**bind_vars: The (str: value) dict returned by build_update_sql.
Raises:
ValueError: If right_expr is not defined.
"""
if right_expr:
self.right_expr = right_expr
elif not self.right_expr:
raise ValueError('No right_expr.')
self.bind_vars = bind_vars
def build_update_sql(self, column_name):
return '%s = %s' % (column_name, self.right_expr), self.bind_vars
class BaseSQLInsertExpr(BaseSQLUpdateExpr):
"""Return SQL for an INSERT VALUES expression.
Expr is used in: INSERT ... VALUES (expr [, expr ...]) ...
"""
def build_insert_sql(self):
"""Return SQL for an INSERT VALUES expression.
Returns:
A (str SQL, (str: value) dict bind_vars) pair.
"""
raise NotImplementedError
def build_update_sql(self, column_name):
"""Return the update SQL expr corresponding to the insert expr.
Any insert expr should have a corresponding update expr; the reverse
is not true ("failures = failures + 3" is an update expr, but
"failures + 3" is not an insert expr).
Args:
column_name: Str name of column to update.
Returns:
A (str SQL, (str: value) dict bind_vars) pair.
"""
insert_sql, bind_vars = self.build_insert_sql()
return '%s = %s' % (column_name, insert_sql), bind_vars
class RawSQLInsertExpr(BaseSQLInsertExpr):
"""A parameterized insert expr.
This is the simplest base class for an SQLInsertExpr.
See BaseSQLInsertExpr.
"""
insert_expr = None
def __init__(self, insert_expr=None, **bind_vars):
"""Pass in the insert_expr and bind_vars.
Either insert_expr or the insert_expr class variable should be
defined.
Args:
insert_expr: Str SQL to be returned from build_insert_sql.
**bind_vars: The (str: value) dict bind_vars to be returned from
build_insert_sql.
Raises:
ValueError: If insert_expr is not defined.
"""
if insert_expr:
self.insert_expr = insert_expr
elif not self.insert_expr:
raise ValueError('No insert_expr.')
self.bind_vars = bind_vars
def build_insert_sql(self):
return self.insert_expr, self.bind_vars
# Deprecated: Use RawSQLUpdateExpr instead.
class MySQLFunction(BaseSQLUpdateExpr):
"""A 'column = func' element of an update set clause.
Example: "failures = failures + %(failures_1)s", {'failures_1': 3}
"""
def __init__(self, func, bind_vars=None):
"""Init MySQLFunction.
Args:
func: Str of right-hand side of 'column = func', with formatting
keys corresponding to bind vars.
bind_vars: A (str: value) bind var dict corresponding
to formatting keys found in func.
"""
self.func = func
self.bind_vars = bind_vars or {}
def build_update_sql(self, column_name):
"""Return (str query, bind vars) for an UPDATE SET clause."""
clause = '%s = %s' % (column_name, self.func)
return clause, self.bind_vars
class BaseSQLSelectExpr(object):
"""Return SQL for a SELECT expression.
Expr is used in: SELECT expr [, expr ...] FROM ...;
"""
def select_sql(self, alias):
"""Return SQL for a SELECT expression.
Args:
alias: Str alias qualifier for column_name. If there is a column_name
for this BaseSQLSelectExpr, it should be written as alias.column_name.
Returns:
Str SQL for a comma-delimited expr in a SELECT ... query.
"""
raise NotImplementedError
class RawSQLSelectExpr(BaseSQLSelectExpr):
"""A SelectExpr that is raw SQL."""
# Derived class must define select_expr.
select_expr = None
def __init__(self, select_expr=None):
"""Pass in the select_expr.
Either select_expr or the select_expr class variable should be
defined.
Args:
select_expr: Str SQL to be returned from select_sql.
Raises:
ValueError: If select_expr is not defined.
"""
if select_expr:
self.select_expr = select_expr
elif not self.select_expr:
raise ValueError('No select_expr.')
def select_sql(self, alias):
_ = alias
return self.select_expr
class Count(RawSQLSelectExpr):
select_expr = 'COUNT(1)'
# This is an overly restrictive class name. For instance,
# this could be used to create "FROM_UNIXTIME(time_created)",
# but this is not an aggregate.
class SQLAggregate(BaseSQLSelectExpr):
"""A 'func(column_name)' element of a select where clause.
Example: "SUM(failures)".
"""
function_name = None
def __init__(self, column_name, function_name=None):
"""Init SQLAggregate.
Either function_name or the function_name class variable should be
defined.
Args:
column_name: Str column name.
function_name: Optional str function name.
Raises:
ValueError: If function_name is not defined.
"""
self.column_name = column_name
if function_name:
self.function_name = function_name
elif not self.function_name:
raise ValueError('No function_name.')
def select_sql(self, alias):
if alias:
col_name = '%s.%s' % (alias, self.column_name)
else:
col_name = self.column_name
clause = '%(function_name)s(%(col_name)s)' % dict(
function_name=self.function_name, col_name=col_name)
return clause
class Max(SQLAggregate):
function_name = 'MAX'
class Min(SQLAggregate):
function_name = 'MIN'
class Sum(SQLAggregate):
function_name = 'SUM'
class BaseSQLWhereExpr(object):
"""Return SQL for a WHERE expression.
Expr is used in WHERE clauses in various ways, like:
... WHERE expr [AND expr ...] ...;
"""
def select_where_sql(self, column_name, counter):
"""Return SQL for a WHERE expression.
Args:
column_name: Name of a column on which this expr operates.
counter: An itertools.count that returns a new number. This
keeps the where clause from having colliding bind vars.
Returns:
A (str SQL, (str: value) bind_vars dict) pair.
"""
raise NotImplementedError
class NullSafeNotValue(BaseSQLWhereExpr):
"""A null-safe inequality operator.
For any [column] and [value] we do "NOT [column] <=> [value]".
This is a bit of a hack because our framework assumes all operators are
binary in nature (whereas we need a combination of unary and binary
operators).
"""
def __init__(self, value):
self.value = value
def build_where_sql(self, column_name, counter):
bind_name = choose_bind_name(column_name, counter=counter)
clause = 'NOT %(column_name)s <=> %%(%(bind_name)s)s' % dict(
column_name=column_name, bind_name=bind_name)
bind_vars = {bind_name: self.value}
return clause, bind_vars
class SQLOperator(BaseSQLWhereExpr):
"""Base class for a column expression in a SQL WHERE clause."""
op = None
def __init__(self, value, op=None):
"""Constructor.
Args:
value: The value against which to compare the column, or an iterable of
values if appropriate for the operator.
op: The operator to use for comparison.
"""
self.value = value
if op:
self.op = op
def build_where_sql(self, column_name, counter):
"""Render this expression as a SQL string.
Args:
column_name: Name of the column being tested in this expression.
counter: Instance of itertools.count supplying numeric suffixes for
disambiguating bind_names, or None. (See choose_bind_name
for a discussion.)
Returns:
clause: The SQL expression, including a placeholder for the value.
bind_vars: Dict mapping placeholder names to actual values.
"""
op = self.op
bind_name = choose_bind_name(column_name, counter=counter)
clause = '%(column_name)s %(op)s %%(%(bind_name)s)s' % dict(
column_name=column_name, op=op, bind_name=bind_name)
bind_vars = {bind_name: self.value}
return clause, bind_vars
class NotValue(SQLOperator):
op = '!='
def build_where_sql(self, column_name, counter):
if self.value is None:
return '%s IS NOT NULL' % column_name, {}
return super(NotValue, self).build_where_sql(column_name, counter=counter)
class InValuesOperatorBase(SQLOperator):
def __init__(self, *values):
super(InValuesOperatorBase, self).__init__(values)
def build_where_sql(self, column_name, counter):
op = self.op
bind_list = make_bind_list(column_name, self.value, counter=counter)
in_clause = ', '.join(('%(' + key + ')s') for key, val in bind_list)
clause = '%(column_name)s %(op)s (%(in_clause)s)' % dict(
column_name=column_name, op=op, in_clause=in_clause)
return clause, dict(bind_list)
# You rarely need to use InValues directly in your database classes.
# List and tuples are handled automatically by most database helper methods.
class InValues(InValuesOperatorBase):
op = 'IN'
class NotInValues(InValuesOperatorBase):
op = 'NOT IN'
class InValuesOrNull(InValues):
def build_where_sql(self, column_name, counter):
clause, bind_vars = super(InValuesOrNull, self).build_where_sql(
column_name, counter=counter)
clause = '(%s OR %s IS NULL)' % (clause, column_name)
return clause, bind_vars
class BetweenValues(SQLOperator):
def __init__(self, value0, value1):
super(BetweenValues, self).__init__((value0, value1), 'BETWEEN')
def build_where_sql(self, column_name, counter):
op = self.op
bind_list = make_bind_list(column_name, self.value, counter=counter)
between_clause = ' AND '.join(('%(' + key + ')s') for key, val in bind_list)
clause = '%(column_name)s %(op)s %(between_clause)s' % dict(
column_name=column_name, op=op, between_clause=between_clause)
return clause, dict(bind_list)
class OrValues(SQLOperator):
def __init__(self, *values):
if not values or len(values) == 1:
raise ValueError('Two or more arguments expected.')
super(OrValues, self).__init__(values, 'OR')
def build_where_sql(self, column_name, counter):
condition_list = []
bind_vars = {}
for v in self.value:
if isinstance(v, BaseSQLWhereExpr):
clause, clause_bind_vars = v.build_where_sql(
column_name, counter=counter)
update_bind_vars(bind_vars, clause_bind_vars)
condition_list.append(clause)
else:
bind_name = choose_bind_name(column_name, counter=counter)
bind_vars[bind_name] = v
condition_list.append('%s = %%(%s)s' % (column_name, bind_name))
or_clause = '((' + ') OR ('.join(condition_list) + '))'
return or_clause, bind_vars
class LikeValue(SQLOperator):
op = 'LIKE'
class GreaterThanValue(SQLOperator):
op = '>'
class GreaterThanOrEqualToValue(SQLOperator):
op = '>='
class LessThanValue(SQLOperator):
op = '<'
class LessThanOrEqualToValue(SQLOperator):
op = '<='
class ModuloEquals(SQLOperator):
"""column % modulus = value."""
def __init__(self, modulus, value):
super(ModuloEquals, self).__init__(value, '%')
self.modulus = modulus
def build_where_sql(self, column, counter):
mod_bind_name = choose_bind_name('modulus', counter=counter)
val_bind_name = choose_bind_name(column, counter=counter)
sql = '(%(column)s %%%% %%(%(mod_bind_name)s)s) = %%(%(val_bind_name)s)s'
return (sql % {'column': column,
'mod_bind_name': mod_bind_name,
'val_bind_name': val_bind_name},
{mod_bind_name: self.modulus,
val_bind_name: self.value})
class Expression(SQLOperator):
"""Operator where value is raw SQL rather than a variable.
Example: "failures < attempts" rather than "failures < 3".
"""
def build_where_sql(self, column_name, counter):
op = self.op
value = str(self.value)
clause = '%(column_name)s %(op)s %(value)s' % dict(
column_name=column_name, op=op, value=value)
return clause, {}
class IsNullOrEmptyString(BaseSQLWhereExpr):
def build_where_sql(self, column_name, counter):
# Note: mysql treats '' the same as ' '
_ = counter
return "(%s IS NULL OR %s = '')" % (column_name, column_name), {}
class IsNullValue(BaseSQLWhereExpr):
def build_where_sql(self, column_name, counter):
_ = counter
return '%s IS NULL' % column_name, {}
class IsNotNullValue(BaseSQLWhereExpr):
def build_where_sql(self, column_name, counter):
_ = counter
return '%s IS NOT NULL' % column_name, {}
class Flag(BaseSQLUpdateExpr, BaseSQLWhereExpr):
"""A class with flags_present and flags_absent.
This can create SELECT WHERE clause sql like "flags & 0x3 = 0x1" and
UPDATE SET clause sql like "flags = (flags | 0x1) & ^0x2".
"""
def __init__(self, flags_present=0x0, flags_absent=0x0):
if flags_present & flags_absent:
raise ValueError(
'flags_present (0x%016x) and flags_absent (0x%016x)'
' overlap: 0x%016x' % (
flags_present, flags_absent, flags_present & flags_absent))
self.mask = flags_present | flags_absent
self.value = flags_present
self.flags_present = flags_present
self.flags_absent = flags_absent
# These are poorly named and should be deprecated.
self.flags_to_remove = flags_absent
self.flags_to_add = flags_present
def __repr__(self):
return '%s(flags_present=0x%X, flags_absent=0x%X)' % (
self.__class__.__name__, self.flags_to_add, self.flags_to_remove)
def __or__(self, other):
return Flag(flags_present=self.flags_to_add | other.flags_to_add,
flags_absent=self.flags_to_remove | other.flags_to_remove)
def __eq__(self, other):
if not isinstance(other, Flag):
return False
return self.mask == other.mask and self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def build_where_sql(self, column_name, counter):
"""Return SELECT WHERE clause and bind_vars.
Args:
column_name: Str name of SQL column.
counter: An itertools.count to keep bind variable names from colliding.
Returns:
A (str clause, (str: obj) bind_vars dict) pair.
"""
bind_name_mask = choose_bind_name(column_name + '_mask', counter=counter)
bind_name_value = choose_bind_name(column_name + '_value', counter=counter)
clause = (
'{column_name} & %({bind_name_mask})s = '
'%({bind_name_value})s'.format(
bind_name_mask=bind_name_mask, bind_name_value=bind_name_value,
column_name=column_name))
bind_vars = {
bind_name_mask: self.mask,
bind_name_value: self.value
}
return clause, bind_vars
def build_update_sql(self, column_name='flags'):
"""Return UPDATE WHERE clause and bind_vars.
Args:
column_name: Str name of SQL column.
Returns:
A (str clause, (str: obj) bind_vars dict) pair.
"""
clause = (
'%(column_name)s = (%(column_name)s | '
'%%(update_%(column_name)s_add)s) & '
'~%%(update_%(column_name)s_remove)s') % dict(
column_name=column_name)
bind_vars = {
'update_%s_add' % column_name: self.flags_to_add,
'update_%s_remove' % column_name: self.flags_to_remove}
return clause, bind_vars
def make_flag(flag_mask, value):
if value:
return Flag(flags_present=flag_mask)
else:
return Flag(flags_absent=flag_mask)
def update_bind_vars(bind_vars, new_bind_vars):
"""Merge new_bind_vars into bind_vars, disallowing duplicates."""
for k, v in new_bind_vars.iteritems():
if k in bind_vars:
raise ValueError(
'Duplicate bind vars: cannot add %s to %s.' %
(k, sorted(bind_vars)))
bind_vars[k] = v
class Increment(BaseSQLUpdateExpr):
def __init__(self, amount):
self.amount = amount
def build_update_sql(self, column_name):
clause = (
'%(column_name)s = (%(column_name)s + '
'%%(update_%(column_name)s_amount)s)') % dict(
column_name=column_name)
bind_vars = {'update_%s_amount' % column_name: self.amount}
return clause, bind_vars
def _normalize_order_by(order_by):
"""Return str list list."""
if not isinstance(order_by, (tuple, list)):
order_by = order_by,
words_list = []
for item in order_by:
if not isinstance(item, (tuple, list)):
item = item,
words_list.append(' '.join(item).split())
return words_list
|
bsd-3-clause
|
dangra/scrapy
|
scrapy/utils/console.py
|
6
|
3431
|
from functools import wraps
from collections import OrderedDict
def _embed_ipython_shell(namespace={}, banner=''):
"""Start an IPython Shell"""
try:
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.ipapp import load_default_config
except ImportError:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
from IPython.frontend.terminal.ipapp import load_default_config
@wraps(_embed_ipython_shell)
def wrapper(namespace=namespace, banner=''):
config = load_default_config()
# Always use .instace() to ensure _instance propagation to all parents
# this is needed for <TAB> completion works well for new imports
# and clear the instance to always have the fresh env
# on repeated breaks like with inspect_response()
InteractiveShellEmbed.clear_instance()
shell = InteractiveShellEmbed.instance(
banner1=banner, user_ns=namespace, config=config)
shell()
return wrapper
def _embed_bpython_shell(namespace={}, banner=''):
"""Start a bpython shell"""
import bpython
@wraps(_embed_bpython_shell)
def wrapper(namespace=namespace, banner=''):
bpython.embed(locals_=namespace, banner=banner)
return wrapper
def _embed_ptpython_shell(namespace={}, banner=''):
"""Start a ptpython shell"""
import ptpython.repl
@wraps(_embed_ptpython_shell)
def wrapper(namespace=namespace, banner=''):
print(banner)
ptpython.repl.embed(locals=namespace)
return wrapper
def _embed_standard_shell(namespace={}, banner=''):
"""Start a standard python shell"""
import code
try: # readline module is only available on unix systems
import readline
except ImportError:
pass
else:
import rlcompleter # noqa: F401
readline.parse_and_bind("tab:complete")
@wraps(_embed_standard_shell)
def wrapper(namespace=namespace, banner=''):
code.interact(banner=banner, local=namespace)
return wrapper
DEFAULT_PYTHON_SHELLS = OrderedDict([
('ptpython', _embed_ptpython_shell),
('ipython', _embed_ipython_shell),
('bpython', _embed_bpython_shell),
('python', _embed_standard_shell),
])
def get_shell_embed_func(shells=None, known_shells=None):
"""Return the first acceptable shell-embed function
from a given list of shell names.
"""
if shells is None: # list, preference order of shells
shells = DEFAULT_PYTHON_SHELLS.keys()
if known_shells is None: # available embeddable shells
known_shells = DEFAULT_PYTHON_SHELLS.copy()
for shell in shells:
if shell in known_shells:
try:
# function test: run all setup code (imports),
# but dont fall into the shell
return known_shells[shell]()
except ImportError:
continue
def start_python_console(namespace=None, banner='', shells=None):
"""Start Python console bound to the given namespace.
Readline support and tab completion will be used on Unix, if available.
"""
if namespace is None:
namespace = {}
try:
shell = get_shell_embed_func(shells)
if shell is not None:
shell(namespace=namespace, banner=banner)
except SystemExit: # raised when using exit() in python code.interact
pass
|
bsd-3-clause
|
tensorflow/model-analysis
|
tensorflow_model_analysis/eval_saved_model/util_test.py
|
1
|
12546
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tests for util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.eval_saved_model import util
from tensorflow.core.example import example_pb2
class UtilTest(testutil.TensorflowModelAnalysisTest):
def testMakeExample(self):
expected = example_pb2.Example()
expected.features.feature['single_float'].float_list.value[:] = [1.0]
expected.features.feature['single_int'].int64_list.value[:] = [2]
expected.features.feature['single_str'].bytes_list.value[:] = [b'apple']
expected.features.feature['multi_float'].float_list.value[:] = [4.0, 5.0]
expected.features.feature['multi_int'].int64_list.value[:] = [6, 7]
expected.features.feature['multi_str'].bytes_list.value[:] = [
b'orange', b'banana'
]
self.assertEqual(
expected,
util.make_example(
single_float=1.0,
single_int=2,
single_str='apple',
multi_float=[4.0, 5.0],
multi_int=[6, 7],
multi_str=['orange', 'banana']))
def testSplitTensorValueDense(self):
split_tensor_values = util.split_tensor_value(
np.ndarray(shape=(3, 2), buffer=np.array([2, 4, 6, 8, 10, 12])))
self.assertAllEqual([
np.ndarray(shape=(1, 2), buffer=np.array([2, 4])),
np.ndarray(shape=(1, 2), buffer=np.array([6, 8])),
np.ndarray(shape=(1, 2), buffer=np.array([10, 12])),
], split_tensor_values)
def testSplitTensorValueSparse(self):
split_tensor_values = util.split_tensor_value(
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [0, 1], [1, 0], [1, 1], [3, 0], [3, 1]]),
values=np.array([1, 3, 5, 7, 9, 11]),
dense_shape=np.array([4, 2])))
expected_sparse_tensor_values = [
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [0, 1]]),
values=np.array([1, 3]),
dense_shape=np.array([1, 2])),
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [0, 1]]),
values=np.array([5, 7]),
dense_shape=np.array([1, 2])),
tf.compat.v1.SparseTensorValue(
indices=np.zeros([0, 2], dtype=np.int64),
values=np.zeros([0], dtype=np.int64),
dense_shape=np.array([1, 0])),
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [0, 1]]),
values=np.array([9, 11]),
dense_shape=np.array([1, 2])),
]
for expected_sparse_tensor_value, got_sparse_tensor_value in zip(
expected_sparse_tensor_values, split_tensor_values):
self.assertSparseTensorValueEqual(expected_sparse_tensor_value,
got_sparse_tensor_value)
def testSplitTensorValueSparseVarLen(self):
split_tensor_values = util.split_tensor_value(
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [1, 1], [1, 2], [1, 3], [2, 0],
[2, 1]]),
values=np.array([1, 2, 3, 4, 5, 6, 7]),
dense_shape=np.array([3, 4])))
expected_sparse_tensor_values = [
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0]]),
values=np.array([1]),
dense_shape=np.array([1, 1])),
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [0, 1], [0, 2], [0, 3]]),
values=np.array([2, 3, 4, 5]),
dense_shape=np.array([1, 4])),
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [0, 1]]),
values=np.array([6, 7]),
dense_shape=np.array([1, 2])),
]
for expected_sparse_tensor_value, got_sparse_tensor_value in zip(
expected_sparse_tensor_values, split_tensor_values):
self.assertSparseTensorValueEqual(expected_sparse_tensor_value,
got_sparse_tensor_value)
def testSplitTensorValueSparseVarLenMultiDim(self):
split_tensor_values = util.split_tensor_value(
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0, 0], [0, 0, 1], [1, 1, 2], [1, 3, 4],
[3, 0, 3], [3, 2, 1], [3, 3, 0]],
dtype=np.int64),
values=np.array([1, 2, 3, 4, 5, 6, 7], dtype=np.int64),
dense_shape=np.array([4, 4, 5])))
expected_sparse_tensor_values = [
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0, 0], [0, 0, 1]]),
values=np.array([1, 2]),
dense_shape=np.array([1, 1, 2])),
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 1, 2], [0, 3, 4]]),
values=np.array([3, 4]),
dense_shape=np.array([1, 4, 5])),
tf.compat.v1.SparseTensorValue(
indices=np.zeros([0, 3], dtype=np.int64),
values=np.zeros([0], dtype=np.int64),
dense_shape=np.array([1, 0, 0])),
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0, 3], [0, 2, 1], [0, 3, 0]]),
values=np.array([5, 6, 7]),
dense_shape=np.array([1, 4, 4])),
]
for expected_sparse_tensor_value, got_sparse_tensor_value in zip(
expected_sparse_tensor_values, split_tensor_values):
self.assertSparseTensorValueEqual(expected_sparse_tensor_value,
got_sparse_tensor_value)
def testSplitTensorValueSparseTypesPreserved(self):
split_tensor_values = util.split_tensor_value(
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [0, 1], [2, 0], [3, 1]]),
values=np.array(['zero0', 'zero1', 'two0', 'three1'],
dtype=np.object),
dense_shape=np.array([4, 3])))
expected_sparse_tensor_values = [
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [0, 1]]),
values=np.array(['zero0', 'zero1'], dtype=np.object),
dense_shape=np.array([1, 2])),
tf.compat.v1.SparseTensorValue(
indices=np.zeros([0, 2], dtype=np.int64),
values=np.zeros([0], dtype=np.object),
dense_shape=np.array([1, 0])),
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0]]),
values=np.array(['two0'], dtype=np.object),
dense_shape=np.array([1, 1])),
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 1]]),
values=np.array(['three1'], dtype=np.object),
dense_shape=np.array([1, 2])),
]
for expected_sparse_tensor_value, got_sparse_tensor_value in zip(
expected_sparse_tensor_values, split_tensor_values):
self.assertSparseTensorValueEqual(expected_sparse_tensor_value,
got_sparse_tensor_value)
def testMergeTensorValueDense(self):
merged_tensor_values = util.merge_tensor_values(tensor_values=[
np.ndarray(shape=(1, 2), buffer=np.array([1, 2])),
np.ndarray(shape=(1, 2), buffer=np.array([3, 4])),
np.ndarray(shape=(1, 2), buffer=np.array([5, 6])),
])
self.assertAllEqual(
np.ndarray(shape=(3, 2), buffer=np.array([1, 2, 3, 4, 5, 6])),
merged_tensor_values)
def testMergeTensorValueDenseDifferentShapesInts(self):
merged_tensor_values = util.merge_tensor_values(tensor_values=[
np.array([[[10], [11]]]),
np.array([[[20, 21, 22]]]),
np.array([[[30, 31], [32, 33], [34, 35]]]),
np.array([[[40, 41]]]),
])
self.assertAllEqual(
np.array([
# Row 0
[[10, 0, 0], [11, 0, 0], [0, 0, 0]],
# Row 1
[[20, 21, 22], [0, 0, 0], [0, 0, 0]],
# Row 2
[[30, 31, 0], [32, 33, 0], [34, 35, 0]],
# Row 3
[[40, 41, 0], [0, 0, 0], [0, 0, 0]],
]),
merged_tensor_values)
def testMergeTensorValueDenseDifferentShapesStrings(self):
merged_tensor_values = util.merge_tensor_values(tensor_values=[
np.array([[['apple'], ['banana']]]),
np.array([[['cherry', 'date', 'elderberry']]]),
np.array([[['fig', 'guava'], ['honeydew', 'imbe'],
['jackfruit', 'kiwi']]])
])
self.assertAllEqual(
np.array([
# Row 0
[['apple', '', ''], ['banana', '', ''], ['', '', '']],
# Row 1
[['cherry', 'date', 'elderberry'], ['', '', ''], ['', '', '']],
# Row 2
[['fig', 'guava', ''], ['honeydew', 'imbe', ''],
['jackfruit', 'kiwi', '']]
]),
merged_tensor_values)
def testMergeTensorValueSparse(self):
merged_tensor_values = util.merge_tensor_values(tensor_values=[
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [0, 1]]),
values=np.array([1, 2]),
dense_shape=np.array([1, 2])),
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [0, 1]]),
values=np.array([3, 4]),
dense_shape=np.array([1, 2])),
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [0, 1]]),
values=np.array([5, 6]),
dense_shape=np.array([1, 2])),
])
self.assertSparseTensorValueEqual(
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0], [0, 1], [1, 0], [1, 1], [2, 0], [2, 1]]),
values=np.array([1, 2, 3, 4, 5, 6]),
dense_shape=np.array([3, 2])), merged_tensor_values)
def testMergeTensorValuesSparseOriginalsUnmodified(self):
value1 = tf.compat.v1.SparseTensorValue(
indices=np.array([]).reshape([0, 2]),
values=np.array([]).reshape([0, 1]),
dense_shape=np.array([1, 4]))
value2 = tf.compat.v1.SparseTensorValue(
indices=np.array([]).reshape([0, 2]),
values=np.array([]).reshape([0, 1]),
dense_shape=np.array([1, 4]))
merged_tensor_values = util.merge_tensor_values(
tensor_values=[value1, value2])
# Check that the original SparseTensorValues were not mutated.
self.assertSparseTensorValueEqual(
tf.compat.v1.SparseTensorValue(
indices=np.array([]).reshape([0, 2]),
values=np.array([]).reshape([0, 1]),
dense_shape=np.array([1, 4])), value1)
self.assertSparseTensorValueEqual(
tf.compat.v1.SparseTensorValue(
indices=np.array([]).reshape([0, 2]),
values=np.array([]).reshape([0, 1]),
dense_shape=np.array([1, 4])), value2)
# Check the merged SparseTensorValue.
self.assertSparseTensorValueEqual(
tf.compat.v1.SparseTensorValue(
indices=np.array([]).reshape([0, 2]),
values=np.array([]).reshape([0, 1]),
dense_shape=np.array([2, 4])), merged_tensor_values)
def testMergeTensorValueSparseDifferentShapes(self):
merged_tensor_values = util.merge_tensor_values(tensor_values=[
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0, 0], [0, 1, 1]]),
values=np.array([10, 12]),
dense_shape=np.array([1, 2, 2])),
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 2, 2]]),
values=np.array([22]),
dense_shape=np.array([1, 3, 3])),
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0, 4]]),
values=np.array([33]),
dense_shape=np.array([1, 1, 5]))
])
self.assertSparseTensorValueEqual(
tf.compat.v1.SparseTensorValue(
indices=np.array([[0, 0, 0], [0, 1, 1], [1, 2, 2], [2, 0, 4]]),
values=np.array([10, 12, 22, 33]),
dense_shape=np.array([3, 3, 5])), merged_tensor_values)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
|
easmetz/inasafe
|
safe/metadata/property/dictionary_property.py
|
4
|
1715
|
# -*- coding: utf-8 -*-
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**metadata module.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '03/12/15'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import json
from types import NoneType
from safe.common.exceptions import MetadataCastError
from safe.metadata.property import BaseProperty
class DictionaryProperty(BaseProperty):
"""A property that accepts dictionary input
"""
# if you edit this you need to adapt accordingly xml_value and is_valid
_allowed_python_types = [dict, NoneType]
def __init__(self, name, value, xml_path):
super(DictionaryProperty, self).__init__(
name, value, xml_path, self._allowed_python_types)
@classmethod
def is_valid(cls, value):
return True
def cast_from_str(self, value):
try:
return json.loads(value)
except ValueError as e:
raise MetadataCastError(e)
@property
def xml_value(self):
if self.python_type is dict:
return json.dumps(self.value)
elif self.python_type is NoneType:
return ''
else:
raise RuntimeError('self._allowed_python_types and self.xml_value'
'are out of sync. This should never happen')
|
gpl-3.0
|
rjhd2/HadISD_v2
|
mk_netcdf_files.py
|
1
|
92608
|
#!/usr/local/sci/bin/python
#************************************************************************
# SVN Info
#$Rev:: 112 $: Revision of last commit
#$Author:: rdunn $: Author of last commit
#$Date:: 2017-01-13 14:47:17 +0000 (Fri, 13 Jan 2017) $: Date of last commit
#************************************************************************
'''
Python script to read the ISD ASCII text format and output netcdf files
Runs with no inputs in current version.
Compared to IDL output using compare.py on 30May2012 and found to match
except for total_cloud_flags - but on investigation with raw ISD files
the python extraction is the correct one. RJHD
Could change data types to match IDL, but adapting QC so that works with
floats and doubles as appropriate.RJHD
Logic change to match IDL so that overwrite only acted upon if writing
real data, rather than missing. RJHD
'''
import numpy as np
import datetime as dt
import glob
import gzip
import subprocess
import math
import netCDF4 as ncdf
import sys
import os
import argparse
import datetime, calendar
# RJHD utils
from set_paths_and_vars import *
# Globals
station_list_filename= 'candidate_stations.txt'
merger_list_filename = 'final_mergers.txt'
INTMDI=-999
FLTMDI=-1.e30
hours = True # output time axis in hours.
NCDC_FLAGS={'A':10,'U':11,'P':12,'I':13,'M':14,'C':15,'R':16, 'E':17, 'J':18}
#---------------------------------------------------------------------
#************************************************************************
def ReadStations(filename):
"""
Read Station Information
:param string filename: name and location of input file
:returns: numpy array of file contents
Use numpy genfromtxt reading to read in all station
data in ID,Lat,Lon,Elev list
"""
return np.genfromtxt(filename, dtype=(str)) # ReadStations
#************************************************************************
def ReadComposites(filename):
"""
Read Composite Station Information
:param string filename: name and location of input file
:returns: list of lists containing composites
"""
composites=[]
try:
with open(filename) as infile:
for line in infile:
split_line=line.split()
composites.append(split_line[:])
except IOError:
print "File not found: ",filename
raise IOError
return composites # ReadComposites
#************************************************************************
def RepresentsInt(s):
"""
Tests if string is an integer
:param string s: string to test
:returns: boolean if string is valid integer
"""
try:
int(s)
return True
except ValueError:
return False # RepresentsInt
#************************************************************************
def TimeMatch(timearray,testtime, lower,upper):
"""
Do matching of np array to find time step
:param array timearray: np array of timestamps
:param float testtime: timestep to find
:return: int of location
"""
return np.argwhere(timearray[lower:upper]==testtime)[0] # TimeMatch
#************************************************************************
def ExtractValues(missing,line,location,length,test,divisor=1.,flagoffset=0, doflag=True):
"""
Extract the appropriate values from the line string.
Assumes that usually it is a value,flag pair, with no scaling factor and that
the flag follows directly on from the value. Can be adjusted.
:param float/int missing: mdi
:param string line: input line from file
:param int location: location of start of string subset to read in
:param int length: length of string to convert to value
:param int test: value of string if data missing
:param float divisor: scaling factor for data, default=1
:param int flagoffset: shift of flag from end of data, default=0
:param boolean doflag: to extract a flag value, default=True
:returns: tuple of value, flag OR value if doflag=False
"""
temp_value=line[location:location+length]
value=missing
flag=INTMDI
if temp_value != test:
if missing==FLTMDI:
value=float(temp_value)/divisor
elif missing==INTMDI:
value=int(temp_value)/divisor
elif missing=='':
value=temp_value
if doflag:
flag=line[location+length+flagoffset:location+length+flagoffset+1]
if RepresentsInt(flag):
flag=int(flag)
else:
try:
flag=NCDC_FLAGS[flag]
except KeyError:
print 'ALPHA FLAG CONVERSION FAILED'
print 'input flag is: '+flag
print 'Line in raw ISD record reads:'
print line
flag=20
if doflag:
return value,flag
else:
return value # ExtractValues
#************************************************************************
def TestToExtract(data,missing,overwrite):
"""
Test if need to extract the data
:param float/int data: data to test
:param float/int missing: missing value
:param boolean overwrite: to overwrite or not
:returns: boolean if condition met
"""
if data==missing or overwrite:
return True
else:
return False # TestToExtract
#************************************************************************
def ExtractionProcess(data, flags, time, missing, missingtest, line, location, length,divisor=1.,flagoffset=0, doflag=True):
"""
Run the extraction, and write the values if the extracted ones are not empty
:param array data: the data array
:param array flags: the flags array
:param int time: the time stamp
:param float/int missing: mdi
:param float/int missingtest: value of string if missing
:param string line: input line from file
:param int location: location of start of string subset to read in
:param int length: length of string to convert to value
:param int test: value of string if data missing
:param float divisor: scaling factor for data, default=1
:param int flagoffset: shift of flag from end of data, default=0
:param boolean doflag: to extract a flag value, default=True
"""
if doflag:
value,flag=ExtractValues(missing,line,location,length,missingtest,divisor=divisor,flagoffset=flagoffset,doflag=doflag)
# no longer want to test that there was a value - as all data taken from
# single observation, regardless of who complete it is.
# left old code extant in case changes to happen in future
if value != missing:
data[time],flags[time]=value,flag
else:
data[time],flags[time]=value,flag
else:
value=ExtractValues(missing,line,location,length,missingtest,divisor=divisor,flagoffset=flagoffset,doflag=doflag)
if value != missing:
data[time]=value
else:
data[time]=value
return # ExtractionProcess
#************************************************************************
def WriteDubious(outfile,infile,code, station, time):
"""
Write note to dubious file list.
:param string outfile: filename to be written to
:param string infile: filename of dubious file
:param string code: text identifier of variables being tested
:param string station: station ID being processed
:param string time: time of the dubious data
:returns: int of flag status.
"""
flagged=0
try:
with open(outfile,'a') as of:
of.write(station+' '+time+' '+code+' variables are first, but not nec. only problem '+infile+'\n')
of.close()
flagged=1
except IOError:
# file doesn't exist as yet, so make a new one
with open(outfile,'w') as of:
of.write(station+' '+time+' '+code+' variables are first, but not nec. only problem '+infile+'\n')
of.close()
flagged=1
return flagged # WriteDubious
#************************************************************************
def SortClouds(cloud_cover,cloud_flags, time, amounts, flags, clouds):
"""
Convert the raw cloud data into oktas for each level
:param array cloud_cover: final cloud_cover array
:param array cloud_flags: final cloud_flags array
:param int time_loc: time stamp
:param array amounts: raw cloud amounts - in oktas
:param array flags: raw cloud flags
:param array clouds: locations of where cloud heights match this level
"""
if len(clouds)>=1 and cloud_cover[time]==INTMDI:
cloud_cover[time]=np.max(amounts[clouds])
cloud_flags[time]=np.max(flags[clouds])
return # SortClouds
#************************************************************************
def SortClouds2(cloud_cover,cloud_flags, time, amounts, amounts2, flags, clouds):
"""
Convert the raw cloud data into oktas and for each level
:param array cloud_cover: final cloud_cover array
:param array cloud_flags: final cloud_flags array
:param int time_loc: time stamp
:param array amounts: raw cloud amounts - in other units - see ISD documentation
:param array amounts2: raw cloud amounts - in oktas
:param array flags: raw cloud flags
:param array clouds: locations of where cloud heights match this level
"""
inoktas=np.where(np.array(amounts2[clouds]) != INTMDI)[0]
if len(inoktas)>=1 and cloud_cover[time]==INTMDI:
cloud_cover[time]=np.max(amounts[clouds][inoktas])
cloud_flags[time]=np.max(flags[clouds][inoktas])
elif cloud_cover[time]==INTMDI:
# convert to oktas
cloud_cover[time]=np.max(amounts[clouds])*2.
cloud_flags[time]=np.max(flags[clouds])
return # SortClouds2
#************************************************************************
def WriteAttributes(variable,long_name,cell_methods,missing_value,units,axis,vmin,vmax,coordinates,standard_name = ''):
"""
Write given attributes into ncdf variable
:param object variable: netcdf Variable
:param string long_name: long_name value for variable to be written
:param string cell_methods: cell_methods value for variable to be written
:param float/int missing_value: missing_value value for variable to be written
:param string units: units value for variable to be written
:param string axis: axis value for variable to be written
:param float/int vmin: valid_min value for variable to be written
:param float/int vmax: valid_max value for variable to be written
:param string standard_name: standard_name value for variable to be written
:param string coordinates: coordinates to associate to variable
"""
variable.long_name=long_name
variable.cell_methods=cell_methods
variable.missing_value=missing_value
# variable.axis=axis # 12/1/17 RJHD - not required for CF compliance.
variable.units=units
variable.valid_min=vmin
variable.valid_max=vmax
variable.coordinates=coordinates
if standard_name != '':
variable.standard_name=standard_name
return # WriteAttributes
#************************************************************************
def WriteFlagAttributes(variable,long_name,missing_value,axis):
"""
Write given attributes into ncdf variable
:param object variable: netcdf Variable
:param string long_name: long_name value for variable to be written
:param float/int missing_value: missing_value value for variable to be written
:param string axis: axis value for variable to be written
"""
variable.long_name=long_name
variable.missing_value=missing_value
variable.units="1"
# variable.axis=axis # 12/1/17 RJHD - not required for CF compliance.
# for future [September 2015]
# http://cfconventions.org/Data/cf-conventions/cf-conventions-1.6/build/cf-conventions.html#flags
return # WriteFlagAttributes
#************************************************************************
def write_coordinates(outfile, short_name, standard_name, long_name, units, axis, data, coordinate_length = 1, do_zip = True):
"""
Write coordinates as variables
:param str outfile: output netcdf file
:param str short_name: netcdf short_name
:param str standard_name: netcdf standard_name
:param str long_name: netcdf long_name
:param str units: netcdf units
:param str axis: netcdf axis
:param flt data: coordinate
:param int coordinate_length: length of dimension
:param bool do_zip: allow for zipping
"""
if "coordinate_length" not in outfile.dimensions:
coord_dim = outfile.createDimension('coordinate_length', coordinate_length)
nc_var = outfile.createVariable(short_name, np.dtype('float'), ('coordinate_length',), zlib = do_zip)
nc_var.standard_name = standard_name
nc_var.long_name = long_name
nc_var.units = units
nc_var.axis = axis
if short_name == "alt":
nc_var.positive = "up"
nc_var[:] = data
return # write_coordinates
#************************************************************************
def MakeNetcdfFiles(STARTYEAR, ENDYEAR, restart_id="", end_id="", do_zip = True, Extra=False):
"""
Parse the ASCII files and do the NetCDF file creation
:param string restart_id: string for starting station, default=""
:param string end_id: string for ending station, default=""
:param boolean do_zip: make netCDF4 files with internal zipping
:param boolean Extra: setting to extract extra variables
"""
StationInfo=ReadStations(INPUT_FILE_LOCS+station_list_filename)
StationIDs=np.array(StationInfo[:,0])
# sorted in case of unordered lists
sort_order=np.argsort(StationIDs)
StationIDs=StationIDs[sort_order]
StationLat=np.array([float(x) for x in StationInfo[sort_order,1]])
StationLon=np.array([float(x) for x in StationInfo[sort_order,2]])
StationElv=np.array([float(x) for x in StationInfo[sort_order,3]])
print "Read in %i stations" % len(StationIDs)
# reduce station list to start and end stations
if restart_id != "":
startindex=np.where(StationIDs==restart_id)[0][0]
StationIDs=StationIDs[startindex:]
StationLat=StationLat[startindex:]
StationLon=StationLon[startindex:]
StationElv=StationElv[startindex:]
if end_id != "":
endindex=np.where(StationIDs==end_id)[0][0]
StationIDs=StationIDs[:endindex+1]
StationLat=StationLat[:endindex+1]
StationLon=StationLon[:endindex+1]
StationElv=StationElv[:endindex+1]
if restart_id !="" or end_id!="":
print "Truncated run selected"
print "Processing %i stations" % len(StationIDs)
nstations=len(StationIDs)
Composites=ReadComposites(INPUT_FILE_LOCS+merger_list_filename)
DaysBetween=dt.datetime(ENDYEAR+1,1,1,0,0)-dt.datetime(STARTYEAR,1,1,0,0)
HoursBetween=int(DaysBetween.days*24.)
TimeStamps=np.linspace(0,HoursBetween-1,HoursBetween) # keep in integer hours
ValidYears=np.linspace(STARTYEAR,ENDYEAR,(ENDYEAR-STARTYEAR+1))
dubiousfile=LOG_OUTFILE_LOCS+'dubious_ISD_data_files.txt'
# read in Canadian station list
Canadian_stations_info = np.genfromtxt(INPUT_FILE_LOCS + "Canada_time_ranges.dat", dtype=(str), delimiter = [12,20,20])
Canadian_station_ids = Canadian_stations_info[:,0]
Canadian_station_start = np.array([dt.datetime.strptime(d.strip(), "%Y-%m-%d %H:%M:%S") for d in Canadian_stations_info[:,1]])
Canadian_station_end = np.array([dt.datetime.strptime(d.strip(), "%Y-%m-%d %H:%M:%S") for d in Canadian_stations_info[:,2]])
dbg_sttime=dt.datetime.now()
for st,station in enumerate(StationIDs):
print '%s, number %i of %i' %(station, st+1, nstations)
temperatures=np.zeros(HoursBetween)
temperature_flags=np.zeros(HoursBetween, dtype=np.int)
dewpoints=np.zeros(HoursBetween)
dewpoint_flags=np.zeros(HoursBetween, dtype=np.int)
total_cloud_cover=np.zeros(HoursBetween, dtype=np.int)
total_cloud_flags=np.zeros(HoursBetween, dtype=np.int)
low_cloud_cover=np.zeros(HoursBetween, dtype=np.int)
low_cloud_flags=np.zeros(HoursBetween, dtype=np.int)
mid_cloud_cover=np.zeros(HoursBetween, dtype=np.int)
mid_cloud_flags=np.zeros(HoursBetween, dtype=np.int)
high_cloud_cover=np.zeros(HoursBetween, dtype=np.int)
high_cloud_flags=np.zeros(HoursBetween, dtype=np.int)
cloud_base=np.zeros(HoursBetween)
cloud_base_flags=np.zeros(HoursBetween, dtype=np.int)
windspeeds=np.zeros(HoursBetween)
windspeeds_flags=np.zeros(HoursBetween, dtype=np.int)
winddirs=np.zeros(HoursBetween, dtype=np.int)
winddirs_flags=np.zeros(HoursBetween, dtype=np.int)
past_sigwx1=np.zeros(HoursBetween, dtype=np.int)
past_sigwx1_period=np.zeros(HoursBetween, dtype=np.int)
past_sigwx1_flag=np.zeros(HoursBetween, dtype=np.int)
precip1_period=np.zeros(HoursBetween, dtype=np.int)
precip1_depth=np.zeros(HoursBetween)
precip1_condition=['null' for i in range(HoursBetween)]
precip1_flag=np.zeros(HoursBetween, dtype=np.int)
slp=np.zeros(HoursBetween)
slp_flag=np.zeros(HoursBetween, dtype=np.int)
sun_duration=np.zeros(HoursBetween)
sun_durationqc=np.zeros(HoursBetween, dtype=np.int)
wind_gust_period=np.zeros(HoursBetween)
wind_gust_value=np.zeros(HoursBetween)
wind_gust_flags=np.zeros(HoursBetween, dtype=np.int)
# Tells you what the true input station id was for the duplicate
# using list as string array.
input_station_id=['null' for i in range(HoursBetween)]
temperatures.fill(FLTMDI)
temperature_flags.fill(INTMDI)
dewpoints.fill(FLTMDI)
dewpoint_flags.fill(INTMDI)
total_cloud_cover.fill(INTMDI)
total_cloud_flags.fill(INTMDI)
low_cloud_cover.fill(INTMDI)
low_cloud_flags.fill(INTMDI)
mid_cloud_cover.fill(INTMDI)
mid_cloud_flags.fill(INTMDI)
high_cloud_cover.fill(INTMDI)
high_cloud_flags.fill(INTMDI)
cloud_base.fill(INTMDI)
cloud_base_flags.fill(INTMDI)
windspeeds.fill(FLTMDI)
windspeeds_flags.fill(INTMDI)
winddirs.fill(INTMDI)
winddirs_flags.fill(INTMDI)
past_sigwx1.fill(INTMDI)
past_sigwx1_period.fill(INTMDI)
past_sigwx1_flag.fill(INTMDI)
precip1_period.fill(INTMDI)
precip1_depth.fill(FLTMDI)
precip1_flag.fill(INTMDI)
slp.fill(FLTMDI)
slp_flag.fill(INTMDI)
sun_duration.fill(INTMDI)
sun_durationqc.fill(INTMDI)
wind_gust_period.fill(FLTMDI)
wind_gust_value.fill(FLTMDI)
wind_gust_flags.fill(INTMDI)
if Extra:
windtypes=['null' for i in range(HoursBetween)]
present_sigwx=np.zeros(HoursBetween, dtype=np.int)
present_sigwx_flags=np.zeros(HoursBetween, dtype=np.int)
past_sigwx2=np.zeros(HoursBetween, dtype=np.int)
past_sigwx2_period=np.zeros(HoursBetween, dtype=np.int)
past_sigwx2_flag=np.zeros(HoursBetween, dtype=np.int)
precip2_period=np.zeros(HoursBetween, dtype=np.int)
precip2_depth=np.zeros(HoursBetween)
precip2_condition=['null' for i in range(HoursBetween)]
precip2_flag=np.zeros(HoursBetween, dtype=np.int)
precip3_period=np.zeros(HoursBetween, dtype=np.int)
precip3_depth=np.zeros(HoursBetween)
precip3_condition=['null' for i in range(HoursBetween)]
precip3_flag=np.zeros(HoursBetween, dtype=np.int)
precip4_period=np.zeros(HoursBetween, dtype=np.int)
precip4_depth=np.zeros(HoursBetween)
precip4_condition=['null' for i in range(HoursBetween)]
precip4_flag=np.zeros(HoursBetween, dtype=np.int)
maximum_temp_period=np.zeros(HoursBetween)
maximum_temp_value=np.zeros(HoursBetween)
maximum_temp_flags=np.zeros(HoursBetween, dtype=np.int)
minimum_temp_period=np.zeros(HoursBetween)
minimum_temp_value=np.zeros(HoursBetween)
minimum_temp_flags=np.zeros(HoursBetween, dtype=np.int)
present_sigwx.fill(INTMDI)
present_sigwx_flags.fill(INTMDI)
past_sigwx2.fill(INTMDI)
past_sigwx2_period.fill(INTMDI)
past_sigwx2_flag.fill(INTMDI)
precip2_period.fill(INTMDI)
precip2_depth.fill(FLTMDI)
precip2_flag.fill(INTMDI)
precip3_period.fill(INTMDI)
precip3_depth.fill(FLTMDI)
precip3_flag.fill(INTMDI)
precip4_period.fill(INTMDI)
precip4_depth.fill(FLTMDI)
precip4_flag.fill(INTMDI)
maximum_temp_period.fill(FLTMDI)
maximum_temp_value.fill(FLTMDI)
maximum_temp_flags.fill(INTMDI)
minimum_temp_period.fill(FLTMDI)
minimum_temp_value.fill(FLTMDI)
minimum_temp_flags.fill(INTMDI)
# extract stations to process, including composites.
is_composite=next((i for i, sublist in enumerate(Composites) if station in sublist), -1)
if is_composite!=-1:
consider_these=Composites[is_composite]
print 'This is a duplicate station containing %s ' % ' '.join(consider_these)
else:
consider_these=[station]
# get listing of all files to process
raw_files=[]
for cstn in consider_these:
if cstn[0:3] >= '725' and cstn[0:3] <= '729':
raw_files.extend(glob.glob(ISD_DATA_LOCS+'station725/'+cstn+'*'))
else:
raw_files.extend(glob.glob(ISD_DATA_LOCS+'station'+cstn[0:2]+'s/'+cstn+'*'))
raw_files.sort()
dbg_lasttime=dt.datetime.now()
for rfile in raw_files:
done_print = False # for output of Canadian station skipping
a=dt.datetime.now()-dbg_lasttime
print rfile, a
dbg_lasttime=dt.datetime.now()
raw_station=rfile.split('/')[-1][0:12]
rfile_year=int(rfile.split('-')[-1].split('.')[0])
rfile_days=dt.datetime(rfile_year,1,1,0,0)-dt.datetime(STARTYEAR,1,1,0,0)
rfile_hours=rfile_days.days*24.
rfile_ydays=dt.datetime(rfile_year+1,1,1,0,0)-dt.datetime(rfile_year,1,1,0,0)
rfile_yhours=rfile_ydays.days*24
if rfile_year in ValidYears:
dubious_flagged=0
if rfile[-2:]!='gz':
subprocess.call(['gzip','-f','-9',rfile])
rfile=rfile+'.gz'
# note - this amends the file identifier in the loop
last_obs_time=0.
try:
with gzip.open(rfile,'r') as infile:
for rawline in infile:
# main processing
# check for remarks
cleanline=rawline[0:rawline.find('REM')]
# find which timestamp we're working at
year=int(cleanline[15:19])
month=int(cleanline[19:21])
day=int(cleanline[21:23])
hour=int(cleanline[23:25])
minute=int(cleanline[25:27])
# found error in minute value in 030910-99999, 035623-99999
if minute < 0:
hour=hour-1
minute=60+minute
elif minute > 59:
hour=hour+1
minute=minute-60
if hour < 0:
day=day-1
hour=24+hour
elif hour > 23:
day=day+1
hour=hour-24
dummy, ndays = calendar.monthrange(year, month)
if day <= 0:
month = month -1
dummy, ndays = calendar.monthrange(year, month)
day=ndays - day
elif day > ndays:
month=month+1
day=day - ndays
if month <= 0:
month = 12 - month
year = year - 1
elif month > 12:
month=month - 12
year = year + 1
dt_time = dt.datetime(year, month, day, hour, minute)
if raw_station in Canadian_station_ids:
# then test for restrictions on start/end times
loc, = np.where(Canadian_station_ids == raw_station)
if dt_time < Canadian_station_start[loc[0]]:
if not done_print:
print "skipping year {} of station {} as identified as undocumented move by Environment Canada".format(year, raw_station)
done_print = True
continue
if dt_time > Canadian_station_end[loc[0]]:
if not done_print:
print "skipping year {} of station {} as identified as undocumented move by Environment Canada".format(year, raw_station)
done_print = True
continue
# integer hours
obs_time=ncdf.date2num(dt_time, units='hours since '+str(STARTYEAR)+'-01-01 00:00:00', calendar='julian')
string_obs_time=dt.datetime.strftime(dt_time,"%d-%m-%Y, %H:%M")
# working in hours, so just round the hours since
# start date to get the time stamp 13/6/2012
time_loc=int(round(obs_time))
# test if this time_loc out of bounds:
# e.g. if 2350 on 31/12/ENDYEAR then should not
# takes the obs as it belongs to following year
if time_loc != HoursBetween:
# test if closer to timestamp than previous observation
# overwrite only acted upon if newer real data closer to full hour
currentT=temperatures[time_loc]
currentD=dewpoints[time_loc]
newT=ExtractValues(FLTMDI,cleanline,87,5,'+9999',divisor=10.,doflag=False)
newD=ExtractValues(FLTMDI,cleanline,93,5,'+9999',divisor=10.,doflag=False)
Extract=False
# no extract happened for this time stamp as yet - so extract
if input_station_id[time_loc] =='null':
# this is not an overwrite, so nothing extra needs doing
Extract=True
# if currently have no T or D data
elif currentT==FLTMDI and currentD==FLTMDI:
# if overwriting, only do so with observation closer to time stamp
if input_station_id[time_loc] == raw_station: # tests if already read into this time stamp
if (newT != FLTMDI) or (newD != FLTMDI):
# if updated data would have T or D, then take it, even if further from the time stamp
Extract=True
elif last_obs_time != 0.: # not really necessary as already have at least 1 obs, but still...
# if time stamp closer than last one
if np.abs(TimeStamps[time_loc]-obs_time) < np.abs(TimeStamps[time_loc]-last_obs_time):
Extract=True
# else just take the line - no observations read into this time stamp yet
else:
Extract=True
# if already have T but _no_ D OR D but _no_ T, but new one has T and D, take this line
# this is an overwrite - so also check that overwriting with the same station
elif ((currentT!=FLTMDI and currentD==FLTMDI) or (currentT==FLTMDI and currentD!=FLTMDI)) \
and (newT!=FLTMDI and newD!=FLTMDI):
if input_station_id[time_loc] == raw_station:
# preference to both values over just one
Extract=True
# have D but no T, and new observation comes up with T, select if closer
elif (currentT==FLTMDI and currentD!=FLTMDI) and (newT!=FLTMDI):
# if overwriting, only do so with observation closer to time stamp
if input_station_id[time_loc] == raw_station: # tests if already read into this time stamp
if last_obs_time != 0.: # not really necessary as already have at least 1 obs, but still...
# if time stamp closer than last one
if np.abs(TimeStamps[time_loc]-obs_time) < np.abs(TimeStamps[time_loc]-last_obs_time):
Extract=True
# have T but no D, and new observation comes up with T, select if closer
elif (currentT!=FLTMDI and currentD==FLTMDI) and (newT!=FLTMDI):
# if overwriting, only do so with observation closer to time stamp
if input_station_id[time_loc] == raw_station: # tests if already read into this time stamp
if last_obs_time != 0.: # not really necessary as already have at least 1 obs, but still...
# if time stamp closer than last one
if np.abs(TimeStamps[time_loc]-obs_time) < np.abs(TimeStamps[time_loc]-last_obs_time):
Extract=True
# if already have T and D, and new one also has T and D, but at closer time stamp, take this line
# this is an overwrite - so also check that overwriting with the same station
elif (currentT!=FLTMDI and currentD!=FLTMDI) and (newT!=FLTMDI and newD!=FLTMDI):
if input_station_id[time_loc] == raw_station:
if last_obs_time != 0.: # not really necessary as already have at least 1 obs, but still...
# if time stamp closer than last one
if np.abs(TimeStamps[time_loc]-obs_time) < np.abs(TimeStamps[time_loc]-last_obs_time):
Extract=True
else:
Extract=False # just in case
# sort last obs_time -
last_obs_time=obs_time
if input_station_id[time_loc]=='null':
input_station_id[time_loc]=raw_station
# main variables
dummyflag=0
# if allowed to extract:
if Extract:
ExtractionProcess(temperatures, temperature_flags,time_loc,FLTMDI,'+9999',cleanline,87,5, divisor=10.)
if Extract:
ExtractionProcess(dewpoints, dewpoint_flags,time_loc,FLTMDI,'+9999',cleanline,93,5, divisor=10.)
if Extract:
ExtractionProcess(slp, slp_flag,time_loc,FLTMDI,'99999',cleanline,99,5, divisor=10.)
if Extract:
ExtractionProcess(winddirs, winddirs_flags,time_loc,INTMDI,'999',cleanline,60,3)
if Extra:
ExtractionProcess(windtypes, dummyflag, time_loc,'','-',cleanline,64,1,doflag=False)
if Extract:
ExtractionProcess(windspeeds, windspeeds_flags,time_loc,FLTMDI,'9999',cleanline,65,4, divisor=10.)
if Extract:
ExtractionProcess(cloud_base, cloud_base_flags,time_loc,INTMDI,'99999',cleanline,70,5)
# Optional Variables - need to hunt for start point
# CLOUDs
text_ident='GF1'
exists=cleanline.find(text_ident)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if Extract:
ExtractionProcess(total_cloud_cover, total_cloud_flags,time_loc,INTMDI,'99',cleanline,
exists+3,2,flagoffset=2)
if Extract:
ExtractionProcess(low_cloud_cover, low_cloud_flags,time_loc,INTMDI,'99',cleanline,
exists+8,2)
except IndexError:
# string following data marker doesn't exist
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
text_ident='GA1'
exists_overall=cleanline.find(text_ident)
if exists_overall!=-1:
cloud_amts=np.array([INTMDI for i in range(6)])
cloud_hghts=np.array([INTMDI for i in range(6)])
cloud_flags=np.array([INTMDI for i in range(6)])
flagvals=['GA1','GA2','GA3','GA4','GA5','GA6']
for cl,flg in enumerate(flagvals):
exists=cleanline.find(flg)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if Extract:
ExtractionProcess(cloud_amts, cloud_flags,cl,INTMDI,'99',cleanline,
exists+3,2)
ExtractionProcess(cloud_hghts, dummyflag,cl,INTMDI,'+99999',cleanline,
exists+6,6,doflag=False)
# remove hard coded values?
if cloud_hghts[cl]!=INTMDI:
if cloud_hghts[cl]<=2000:
cloud_hghts[cl]=1
elif cloud_hghts[cl]>=6000:
cloud_hghts[cl]=3
elif cloud_hghts[cl]>=4:
cloud_hghts[cl]=2
except IndexError:
# string following data marker doesn't exist
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident+'-'+flg, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident+'-'+flg, station, string_obs_time)
# end for loop
SortClouds(total_cloud_cover, total_cloud_flags, time_loc, cloud_amts, cloud_flags, range(len(cloud_amts))) # select all using this slice
lowclouds=np.where(np.array(cloud_hghts) == 1)[0]
SortClouds(low_cloud_cover, low_cloud_flags, time_loc, cloud_amts, cloud_flags, lowclouds)
medclouds=np.where(np.array(cloud_hghts) == 2)[0]
SortClouds(mid_cloud_cover, mid_cloud_flags, time_loc, cloud_amts, cloud_flags, medclouds)
hiclouds=np.where(np.array(cloud_hghts) == 3)[0]
SortClouds(high_cloud_cover, high_cloud_flags, time_loc, cloud_amts, cloud_flags, hiclouds)
text_ident='GD1'
exists_overall=cleanline.find(text_ident)
if exists_overall!=-1:
if (total_cloud_cover[time_loc] == INTMDI):
cloud_amts=np.array([INTMDI for i in range(6)])
cloud_amts2=np.array([INTMDI for i in range(6)])
cloud_hghts=np.array([INTMDI for i in range(6)])
cloud_flags=np.array([INTMDI for i in range(6)])
flagvals=['GD1','GD2','GD3','GD4','GD5','GD6']
for cl,flg in enumerate(flagvals):
exists=cleanline.find(flg)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if Extract:
# if TestToExtract(cloud_amts[cl],INTMDI,overwrite):
ExtractionProcess(cloud_amts, cloud_flags,cl,INTMDI,'9',cleanline,
exists+3,1,flagoffset=1)
if cloud_amts[cl] >= 5 :
cloud_amts[cl]=INTMDI
ExtractionProcess(cloud_amts2, dummyflag,cl,INTMDI,'99',cleanline,
exists+4,2,doflag=False)
ExtractionProcess(cloud_hghts, dummyflag,cl,INTMDI,'+99999',cleanline,
exists+7,6,doflag=False)
# remove hard coded values?
if cloud_hghts[cl]!=INTMDI:
if cloud_hghts[cl]<=2000:
cloud_hghts[cl]=1
elif cloud_hghts[cl]>=6000:
cloud_hghts[cl]=3
elif cloud_hghts[cl]>=4:
cloud_hghts[cl]=2
except IndexError:
# string following data marker doesn't exist
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident+'-'+flg, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident+'-'+flg, station, string_obs_time)
# end for loop
SortClouds2(total_cloud_cover, total_cloud_flags, time_loc, cloud_amts, cloud_amts2, cloud_flags, range(len(cloud_amts))) # select whole list with slice
lowclouds=np.where(np.array(cloud_hghts) == 1)[0]
if len(lowclouds)>=1:
SortClouds2(low_cloud_cover, low_cloud_flags, time_loc, cloud_amts, cloud_amts2, cloud_flags, lowclouds)
medclouds=np.where(np.array(cloud_hghts) == 2)[0]
if len(medclouds)>=1:
SortClouds2(mid_cloud_cover, mid_cloud_flags, time_loc, cloud_amts, cloud_amts2, cloud_flags, medclouds)
hiclouds=np.where(np.array(cloud_hghts) == 3)[0]
if len(hiclouds)>=1:
SortClouds2(high_cloud_cover, high_cloud_flags, time_loc, cloud_amts, cloud_amts2, cloud_flags, hiclouds)
# PAST-SIGWX
text_ident='AY1'
exists=cleanline.find(text_ident)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if Extract:
ExtractionProcess(past_sigwx1, past_sigwx1_flag,time_loc,INTMDI,'-',cleanline,
exists+3,1)
ExtractionProcess(past_sigwx1_period, dummyflag,time_loc,INTMDI,'99',cleanline,
exists+5,2,doflag=False)
except IndexError:
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
text_ident='AZ1'
exists=cleanline.find(text_ident)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if Extract:
ExtractionProcess(past_sigwx1, past_sigwx1_flag,time_loc,INTMDI,'-',cleanline,
exists+3,1)
ExtractionProcess(past_sigwx1_period, dummyflag,time_loc,INTMDI,'99',cleanline,
exists+5,2,doflag=False)
except IndexError:
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
# PRECIP
text_ident='AA1'
exists=cleanline.find(text_ident)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if Extract:
ExtractionProcess(precip1_period, dummyflag,time_loc,INTMDI,'99',cleanline,
exists+3,2,doflag=False)
if precip1_period[time_loc] < 0:
precip1_period[time_loc]=INTMDI
ExtractionProcess(precip1_depth, precip1_flag,time_loc,FLTMDI,'9999',cleanline,
exists+5,4,doflag=True,flagoffset=1,divisor=10.)
# these two pass in empty strings as missing data test
ExtractionProcess(precip1_condition, dummyflag,time_loc,'','9',cleanline,
exists+9,1,doflag=False)
except IndexError:
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
# SUN DURATION
text_ident='GJ1'
exists=cleanline.find(text_ident)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if Extract:
ExtractionProcess(sun_duration, sun_durationqc,time_loc,INTMDI,'9999',cleanline,
exists+3,4)
except IndexError:
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
# WIND GUST
for text_ident in ['OA1','OA2','OA3','OA4']:
# test all of the possible locations for wind gust
exists=cleanline.find(text_ident)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if cleanline[exists+3] == "4":
# then this is the maximum gust speed - see ish-format-document
if Extract:
ExtractionProcess(wind_gust_period, dummyflag,time_loc,INTMDI,'99',cleanline,
exists+4,2,doflag=False)
if wind_gust_period[time_loc] < 0:
wind_gust_period[time_loc]=FLTMDI
ExtractionProcess(wind_gust_value, wind_gust_flags,time_loc,FLTMDI,'9999',cleanline,
exists+6,4,divisor=10.)
except IndexError:
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
if Extra:
# PRESENT SIGWX
text_ident='AW1'
exists=cleanline.find(text_ident)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if Extract:
ExtractionProcess(present_sigwx, present_sigwx_flags,time_loc,INTMDI,'--',cleanline,
exists+3,2)
except IndexError:
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
# PAST-SIGWX2
for text_ident in ['AY1','AY2','AZ1','AZ2']:
exists=cleanline.find(text_ident)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if Extract:
ExtractionProcess(past_sigwx2, past_sigwx2_flag,time_loc,INTMDI,'-',cleanline,
exists+3,1)
ExtractionProcess(past_sigwx2_period,dummyflag,time_loc,INTMDI,'99',cleanline,
exists+5,2,doflag=False)
value=ExtractValues(INTMDI, cleanline,exists+5,2,'99',doflag=False)
if value!=INTMDI:
past_sigwx2_period[time_loc]=value
except IndexError:
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
# PRECIP
text_ident='AA2'
exists=cleanline.find(text_ident)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if Extract:
ExtractionProcess(precip2_period,dummyflag,time_loc,INTMDI,'99',cleanline,
exists+3,2,doflag=False)
if precip2_period[time_loc] < 0:
precip2_period[time_loc]=INTMDI
ExtractionProcess(precip2_depth,precip2_flag,time_loc,FLTMDI,'9999',cleanline,
exists+5,4,doflag=True,flagoffset=1,divisor=10.)
ExtractionProcess(precip2_condition, dummyflag,time_loc,'','9',cleanline,
exists+9,1,doflag=False)
# # leave this as is because of separate value and flag tests
# value,flag=ExtractValues('', cleanline,exists+9,1,'9')
# if value!='':
# precip2_condition[time_loc]=value
# if flag in range(20):
# precip2_flag[time_loc]=flag
except IndexError:
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
text_ident='AA3'
exists=cleanline.find(text_ident)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if Extract:
ExtractionProcess(precip3_period,dummyflag,time_loc,INTMDI,'99',cleanline,
exists+3,2,doflag=False)
if precip3_period[time_loc] < 0:
precip3_period[time_loc]=INTMDI
ExtractionProcess(precip3_depth,precip3_flag,time_loc,FLTMDI,'9999',cleanline,
exists+5,4,doflag=True,flagoffset=1,divisor=10.)
ExtractionProcess(precip3_condition, dummyflag,time_loc,'','9',cleanline,
exists+9,1,doflag=False)
# # leave this as is because of separate value and flag tests
# value,flag=ExtractValues('', cleanline,exists+9,1,'9')
# if value!='':
# precip3_condition[time_loc]=value
# if flag in range(20):
# precip3_flag[time_loc]=flag
except IndexError:
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
text_ident='AA4'
exists=cleanline.find(text_ident)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if Extract:
ExtractionProcess(precip4_period,dummyflag,time_loc,INTMDI,'99',cleanline,
exists+3,2,doflag=False)
if precip4_period[time_loc] < 0:
precip4_period[time_loc]=INTMDI
ExtractionProcess(precip4_depth,precip4_flag,time_loc,FLTMDI,'9999',cleanline,
exists+5,4,doflag=True,flagoffset=1,divisor=10.)
ExtractionProcess(precip4_condition, dummyflag,time_loc,'','9',cleanline,
exists+9,1,doflag=False)
# # leave this as is because of separate value and flag tests
# value,flag=ExtractValues('', cleanline,exists+9,1,'9')
# if value!='':
# precip4_condition[time_loc]=value
# if flag in range(20):
# precip4_flag[time_loc]=flag
except IndexError:
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
# EXTREME TEMPERATURES
# RJHD - 11 March 2014 - these could be converted to Tmax and Tmin using the code information
for text_ident in ['KA1','KA2']:
exists=cleanline.find(text_ident)
if exists!=-1:
try:
if RepresentsInt(cleanline[exists+3]):
if cleanline[exists+6] == "N":
# then this is the minimum temperature - see ish-format-document
if Extract:
ExtractionProcess(minimum_temp_period, dummyflag,time_loc,INTMDI,'999',cleanline,
exists+3,3,doflag=False,divisor=10.)
if minimum_temp_period[time_loc] < 0:
minimum_temp_period[time_loc]=FLTMDI
ExtractionProcess(minimum_temp_value, minimum_temp_flags,time_loc,FLTMDI,'+9999',cleanline,
exists+7,5,divisor=10.)
elif cleanline[exists+6] == "M":
# then this is the minimum temperature - see ish-format-document
if Extract:
ExtractionProcess(maximum_temp_period, dummyflag,time_loc,INTMDI,'999',cleanline,
exists+3,3,doflag=False,divisor=10.)
if maximum_temp_period[time_loc] < 0:
maximum_temp_period[time_loc]=FLTMDI
ExtractionProcess(maximum_temp_value, maximum_temp_flags,time_loc,FLTMDI,'+9999',cleanline,
exists+7,5,divisor=10.)
except IndexError:
if dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
elif dubious_flagged==0:
dubious_flagged=WriteDubious(dubiousfile,rfile,text_ident, station, string_obs_time)
# end Extra variables
# end if time_loc != HoursBetween
# end line in file loop
except IOError:
print "Cannot find file: ", rfile
# end file loop
print "parsed all files for station ", station
mask_vals=np.where(np.array(input_station_id) != 'null')[0]
if hours:
times_out=TimeStamps[mask_vals]
else:
times_out=TimeStamps[mask_vals]/24.
# apply the mask
input_station_id=np.array(input_station_id)[mask_vals]
temperatures=temperatures[mask_vals]
temperature_flags=temperature_flags[mask_vals]
dewpoints=dewpoints[mask_vals]
dewpoint_flags=dewpoint_flags[mask_vals]
total_cloud_cover=total_cloud_cover[mask_vals]
total_cloud_flags=total_cloud_flags[mask_vals]
low_cloud_cover=low_cloud_cover[mask_vals]
low_cloud_flags=low_cloud_flags[mask_vals]
mid_cloud_cover=mid_cloud_cover[mask_vals]
mid_cloud_flags=mid_cloud_flags[mask_vals]
high_cloud_cover=high_cloud_cover[mask_vals]
high_cloud_flags=high_cloud_flags[mask_vals]
cloud_base=cloud_base[mask_vals]
cloud_base_flags=cloud_base_flags[mask_vals]
windspeeds=windspeeds[mask_vals]
windspeeds_flags=windspeeds_flags[mask_vals]
winddirs=winddirs[mask_vals]
winddirs_flags=winddirs_flags[mask_vals]
past_sigwx1=past_sigwx1[mask_vals]
past_sigwx1_period=past_sigwx1_period[mask_vals]
past_sigwx1_flag=past_sigwx1_flag[mask_vals]
precip1_period=precip1_period[mask_vals]
precip1_depth=precip1_depth[mask_vals]
precip1_condition=np.array(precip1_condition)[mask_vals]
precip1_flag=precip1_flag[mask_vals]
slp=slp[mask_vals]
slp_flag=slp_flag[mask_vals]
sun_duration=sun_duration[mask_vals]
sun_durationqc=sun_durationqc[mask_vals]
wind_gust_period=wind_gust_period[mask_vals]
wind_gust_value=wind_gust_value[mask_vals]
wind_gust_flags=wind_gust_flags[mask_vals]
if Extra:
windtypes=np.array(windtypes)[mask_vals]
present_sigwx=present_sigwx[mask_vals]
present_sigwx_flags=present_sigwx_flags[mask_vals]
past_sigwx2=past_sigwx2[mask_vals]
past_sigwx2_period=past_sigwx2_period[mask_vals]
past_sigwx2_flag=past_sigwx2_flag[mask_vals]
precip2_period=precip2_period[mask_vals]
precip2_depth=precip2_depth[mask_vals]
precip2_condition=np.array(precip2_condition)[mask_vals]
precip2_flag=precip2_flag[mask_vals]
precip3_period=precip3_period[mask_vals]
precip3_depth=precip3_depth[mask_vals]
precip3_condition=np.array(precip3_condition)[mask_vals]
precip3_flag=precip3_flag[mask_vals]
precip4_period=precip4_period[mask_vals]
precip4_depth=precip4_depth[mask_vals]
precip4_condition=np.array(precip4_condition)[mask_vals]
precip4_flag=precip4_flag[mask_vals]
maximum_temp_period=maximum_temp_period[mask_vals]
maximum_temp_value=maximum_temp_value[mask_vals]
maximum_temp_flags=maximum_temp_flags[mask_vals]
minimum_temp_period=minimum_temp_period[mask_vals]
minimum_temp_value=minimum_temp_value[mask_vals]
minimum_temp_flags=minimum_temp_flags[mask_vals]
netcdf_filename=NETCDF_DATA_LOCS+'/'+station+'.nc'
if do_zip:
netcdf_outfile = ncdf.Dataset(netcdf_filename,'w', format='NETCDF4')
else:
netcdf_outfile = ncdf.Dataset(netcdf_filename,'w', format='NETCDF3_CLASSIC')
time=netcdf_outfile.createDimension('time',len(times_out))
char_len=netcdf_outfile.createDimension('character_length',4)
long_char_len=netcdf_outfile.createDimension('long_character_length',12)
coords_len=netcdf_outfile.createDimension('coordinate_length',1)
# write the coordinates
write_coordinates(netcdf_outfile, "latitude", "latitude", "station_latitude", "degrees_north", "Y", StationLat[st])
write_coordinates(netcdf_outfile, "longitude", "longitude", "station_longitude", "degrees_east", "X", StationLon[st])
write_coordinates(netcdf_outfile, "elevation", "surface_altitude", "vertical distance above the surface", "meters", "Z", StationElv[st])
# station ID as base variable
nc_var = netcdf_outfile.createVariable("station_id", np.dtype('S1'), ('long_character_length',), zlib = do_zip)
# nc_var.standard_name = "station_identification_code"
nc_var.long_name = "Station ID number"
nc_var[:] = ncdf.stringtochar(StationIDs[st])
# create variables
timesvar=netcdf_outfile.createVariable('time','f8',('time',), zlib = do_zip)
# lonsvar=netcdf_outfile.createVariable('lon','f8',('coordinate_length',), zlib = do_zip)
# latsvar=netcdf_outfile.createVariable('lat','f8',('coordinate_length',), zlib = do_zip)
# altsvar=netcdf_outfile.createVariable('alt','f8',('coordinate_length',), zlib = do_zip)
# idsvar=netcdf_outfile.createVariable('station_id','S1',('long_character_length',), zlib = do_zip)
stationsvar=netcdf_outfile.createVariable('input_station_id','S1',('time','long_character_length',), zlib = do_zip)
tempsvar=netcdf_outfile.createVariable('temperatures','f8',('time',), zlib = do_zip)
tempsflagsvar=netcdf_outfile.createVariable('temperature_flags','i4',('time',), zlib = do_zip)
dewsvar=netcdf_outfile.createVariable('dewpoints','f8',('time',), zlib = do_zip)
dewsflagsvar=netcdf_outfile.createVariable('dewpoint_flags','i4',('time',), zlib = do_zip)
tcvar=netcdf_outfile.createVariable('total_cloud_cover','i4',('time',), zlib = do_zip)
tcfvar=netcdf_outfile.createVariable('total_cloud_flags','i4',('time',), zlib = do_zip)
lcvar=netcdf_outfile.createVariable('low_cloud_cover','i4',('time',), zlib = do_zip)
lcfvar=netcdf_outfile.createVariable('low_cloud_flags','i4',('time',), zlib = do_zip)
mcvar=netcdf_outfile.createVariable('mid_cloud_cover','i4',('time',), zlib = do_zip)
mcfvar=netcdf_outfile.createVariable('mid_cloud_flags','i4',('time',), zlib = do_zip)
hcvar=netcdf_outfile.createVariable('high_cloud_cover','i4',('time',), zlib = do_zip)
hcfvar=netcdf_outfile.createVariable('high_cloud_flags','i4',('time',), zlib = do_zip)
cbvar=netcdf_outfile.createVariable('cloud_base','f8',('time',), zlib = do_zip)
cbfvar=netcdf_outfile.createVariable('cloud_base_flags','i4',('time',), zlib = do_zip)
wsvar=netcdf_outfile.createVariable('windspeeds','f8',('time',), zlib = do_zip)
wsfvar=netcdf_outfile.createVariable('windspeeds_flags','i4',('time',), zlib = do_zip)
wdvar=netcdf_outfile.createVariable('winddirs','i4',('time',), zlib = do_zip)
wdfvar=netcdf_outfile.createVariable('winddirs_flags','i4',('time',), zlib = do_zip)
pswx1var=netcdf_outfile.createVariable('past_sigwx1','i4',('time',), zlib = do_zip)
pswx1pvar=netcdf_outfile.createVariable('past_sigwx1_period','i4',('time',), zlib = do_zip)
pswx1fvar=netcdf_outfile.createVariable('past_sigwx1_flag','i4',('time',), zlib = do_zip)
ppt1pvar=netcdf_outfile.createVariable('precip1_period','i8',('time',), zlib = do_zip)
ppt1dvar=netcdf_outfile.createVariable('precip1_depth','f8',('time',), zlib = do_zip)
ppt1cvar=netcdf_outfile.createVariable('precip1_condition','S1',('time','character_length',), zlib = do_zip)
ppt1fvar=netcdf_outfile.createVariable('precip1_flag','i4',('time',), zlib = do_zip)
slpvar=netcdf_outfile.createVariable('slp','f8',('time',), zlib = do_zip)
slpfvar=netcdf_outfile.createVariable('slp_flag','i4',('time',), zlib = do_zip)
sdvar=netcdf_outfile.createVariable('sun_duration','f8',('time',), zlib = do_zip)
sdfvar=netcdf_outfile.createVariable('sun_durationqc','i4',('time',), zlib = do_zip)
wgstpvar=netcdf_outfile.createVariable('wind_gust_period','f8',('time',), zlib = do_zip)
wgstvvar=netcdf_outfile.createVariable('wind_gust','f8',('time',), zlib = do_zip)
wgstfvar=netcdf_outfile.createVariable('wind_gust_flag','i4',('time',), zlib = do_zip)
if Extra:
pswx2var=netcdf_outfile.createVariable('past_sigwx2','i4',('time',), zlib = do_zip)
pswx2pvar=netcdf_outfile.createVariable('past_sigwx2_period','i4',('time',), zlib = do_zip)
pswx2fvar=netcdf_outfile.createVariable('past_sigwx2_flag','i4',('time',), zlib = do_zip)
wtvar=netcdf_outfile.createVariable('windtypes','S1',('time','character_length'), zlib = do_zip)
swxvar=netcdf_outfile.createVariable('present_sigwx','i4',('time',), zlib = do_zip)
swxfvar=netcdf_outfile.createVariable('present_sigwx_flags','i4',('time',), zlib = do_zip)
ppt2pvar=netcdf_outfile.createVariable('precip2_period','i8',('time',), zlib = do_zip)
ppt2dvar=netcdf_outfile.createVariable('precip2_depth','f8',('time',), zlib = do_zip)
ppt2cvar=netcdf_outfile.createVariable('precip2_condition','S1',('time','character_length',), zlib = do_zip)
ppt2fvar=netcdf_outfile.createVariable('precip2_flag','i4',('time',), zlib = do_zip)
ppt3pvar=netcdf_outfile.createVariable('precip3_period','i8',('time',), zlib = do_zip)
ppt3dvar=netcdf_outfile.createVariable('precip3_depth','f8',('time',), zlib = do_zip)
ppt3cvar=netcdf_outfile.createVariable('precip3_condition','S1',('time','character_length',), zlib = do_zip)
ppt3fvar=netcdf_outfile.createVariable('precip3_flag','i4',('time',), zlib = do_zip)
ppt4pvar=netcdf_outfile.createVariable('precip4_period','i8',('time',), zlib = do_zip)
ppt4dvar=netcdf_outfile.createVariable('precip4_depth','f8',('time',), zlib = do_zip)
ppt4cvar=netcdf_outfile.createVariable('precip4_condition','S1',('time','character_length',), zlib = do_zip)
ppt4fvar=netcdf_outfile.createVariable('precip4_flag','i4',('time',), zlib = do_zip)
maxtpvar=netcdf_outfile.createVariable('maximum_temp_period','f8',('time',), zlib = do_zip)
maxtvvar=netcdf_outfile.createVariable('maximum_temp_value','f8',('time',), zlib = do_zip)
maxtfvar=netcdf_outfile.createVariable('maximum_temp_flag','i4',('time',), zlib = do_zip)
mintpvar=netcdf_outfile.createVariable('minimum_temp_period','f8',('time',), zlib = do_zip)
mintvvar=netcdf_outfile.createVariable('minimum_temp_value','f8',('time',), zlib = do_zip)
mintfvar=netcdf_outfile.createVariable('minimum_temp_flag','i4',('time',), zlib = do_zip)
# variables attributes
print "Writing Attributes"
timesvar.long_name='time_of_measurement'
timesvar.standard_name='time'
if hours:
timesvar.units='hours since {}'.format(dt.datetime.strftime(dt.datetime(STARTYEAR,1,1,0,0), "%Y-%m-%d %H:%M"))
else:
timesvar.units='days since {}'.format(dt.datetime.strftime(dt.datetime(STARTYEAR,1,1,0,0), "%Y-%m-%d %H:%M"))
timesvar.axis='T'
timesvar.calendar='gregorian'
timesvar.valid_min=0.
timesvar.start_year = "{}".format(STARTYEAR)
timesvar.end_year = "{}".format(ENDYEAR)
timesvar.start_month = "1"
timesvar.end_month = "12"
# timesvar.coordinates = "time"
# lonsvar.standard_name = "longitude"
# lonsvar.long_name = "station_longitude"
# lonsvar.units = "degrees_east"
# lonsvar.axis = "X"
# latsvar.standard_name = "latitude"
# latsvar.long_name = "station_latitude"
# latsvar.units = "degrees_north"
# latsvar.axis = "Y"
# altsvar.long_name = "vertical distance above the surface"
# altsvar.standard_name = "height"
# altsvar.units = "meters"
# altsvar.positive = "up"
# altsvar.axis = "Z"
# idsvar.standard_name = "station_identification_code"
# idsvar.long_name = "Station ID number"
# idsvar.cf_role='timeseries_id'
# stationsvar.standard_name='station_identification_code'
stationsvar.long_name='Primary source for timestep (may be multiple sources for composite stations). USAF-WBAN from ISD source'
# stationsvar.units='USAF - WBAN from ISD source'
stationsvar.missing_value='null'
try:
tmin,tmax=np.min(temperatures[np.where(temperatures != FLTMDI)[0]]),np.max(temperatures[np.where(temperatures != FLTMDI)[0]])
except ValueError:
tmin,tmax=FLTMDI,FLTMDI
WriteAttributes(tempsvar,'Dry bulb air temperature at screen height (~2m)','latitude: longitude: time: point (nearest to reporting hour)',FLTMDI,'degree_Celsius','T',tmin,tmax,'latitude longitude elevation',standard_name = 'surface_temperature')
WriteFlagAttributes(tempsflagsvar,'ISD flags for temperature - see ISD documentation',INTMDI,'T')
try:
dmin,dmax=np.min(dewpoints[np.where(dewpoints != FLTMDI)[0]]),np.max(dewpoints[np.where(dewpoints != FLTMDI)[0]])
except ValueError:
dmin,dmax=FLTMDI,FLTMDI
WriteAttributes(dewsvar,'Dew point temperature at screen height (~2m)','latitude: longitude: time: point (nearest to reporting hour)',FLTMDI,'degree_Celsius','T',dmin,dmax,'latitude longitude elevation',standard_name = 'dew_point_temperature')
WriteFlagAttributes(dewsflagsvar,'ISD flags for dewpoint temperature - see ISD documentation',INTMDI,'T')
WriteAttributes(tcvar,'Total cloud cover (oktas)','latitude: longitude: time: point (derived in priority order GA, GF, GD - see ISD documentation, nearest to reporting hour)', INTMDI, '1', 'T', 0,8, 'latitude longitude elevation',standard_name = "cloud_area_fraction")
WriteFlagAttributes(tcfvar,'ISD flags for total cloud - see ISD documentation',INTMDI,'T')
WriteAttributes(lcvar,'Low cloud cover (oktas)','latitude: longitude: time: point (derived in priority order GA, GF, GD - see ISD documentation, nearest to reporting hour)', INTMDI, '1', 'T', 0,8, 'latitude longitude elevation',standard_name = "low_type_cloud_area_fraction")
WriteFlagAttributes(lcfvar,'ISD flags for low cloud - see ISD documentation',INTMDI,'T')
WriteAttributes(mcvar,'Mid cloud cover (oktas)','latitude: longitude: time: point (derived in priority order GA, GF, GD - see ISD documentation, nearest to reporting hour)', INTMDI, '1', 'T', 0,8, 'latitude longitude elevation',standard_name = "medium_type_cloud_area_fraction")
WriteFlagAttributes(mcfvar,'ISD flags for mid cloud - see ISD documentation',INTMDI,'T')
WriteAttributes(hcvar,'High cloud cover (oktas)','latitude: longitude: time: point (derived in priority order GA, GF, GD - see ISD documentation, nearest to reporting hour)', INTMDI, '1', 'T', 0,8, 'latitude longitude elevation',standard_name = "high_type_cloud_area_fraction")
WriteFlagAttributes(hcfvar,'ISD flags for high cloud - see ISD documentation',INTMDI,'T')
try:
cbmin,cbmax=np.min(cloud_base[np.where(cloud_base != INTMDI)[0]]),np.max(cloud_base[np.where(cloud_base != INTMDI)[0]])
except ValueError:
cbmin,cbmax=FLTMDI,FLTMDI
WriteAttributes(cbvar,'Cloud base of lowest cloud layer','latitude: longitude: time: point (nearest to reporting hour)', INTMDI, 'meters', 'T', cbmin, cbmax, 'latitude longitude elevation',standard_name = 'cloud_base_altitude')
WriteFlagAttributes(cbfvar,'ISD flags for cloud base - see ISD documentation',INTMDI,'T')
try:
wsmin,wsmax=np.min(windspeeds[np.where(windspeeds != FLTMDI)[0]]),np.max(windspeeds[np.where(windspeeds != FLTMDI)[0]])
except ValueError:
wsmin,wsmax=FLTMDI,FLTMDI
WriteAttributes(wsvar,'Wind speed at mast height (~10m)','latitude: longitude: time: point (nearest to reporting hour)', FLTMDI, 'meters per second', 'T', wsmin, wsmax,'latitude longitude elevation',standard_name = 'wind_speed')
WriteFlagAttributes(wsfvar,'ISD flags for windspeed - see ISD documentation',INTMDI,'T')
WriteAttributes(wdvar,'Wind Direction at mast height (~10m)','latitude: longitude: time: point (nearest to reporting hour)', INTMDI, 'degree', 'T', 0, 360, 'latitude longitude elevation',standard_name = 'wind_from_direction')
WriteFlagAttributes(wdfvar,'ISD flags for wind direction - see ISD documentation',INTMDI,'T')
WriteAttributes(pswx1var,'Reported past significant weather phenomena','latitude: longitude: point (interval: 1 day)', INTMDI, '1', 'T', 0, 9,'latitude longitude elevation')
WriteFlagAttributes(pswx1fvar,'ISD flags for reported past significant weather - see ISD documentation',INTMDI,'T')
WriteAttributes(pswx1pvar,'Reported period over which significant weather report was recorded','latitude: longitude: point (interval: 1 day)', INTMDI, 'Hours', 'T', 0, 24,'latitude longitude elevation')
WriteAttributes(ppt1pvar,'Reported period over which precipitation was recorded','latitude: longitude: point', long(INTMDI), 'hour', 'T', 0, 98,'latitude longitude elevation precip1_depth') #, standard_name = 'period_of_precipitation_report')
WriteAttributes(ppt1dvar,'Depth of Precipitation Reported over time period','latitude: longitude: time: sum ', FLTMDI, 'mm', 'T', 0, 999.8,'latitude longitude elevation precip1_period', standard_name = 'lwe_thickness_of_precipitation_amount')
WriteFlagAttributes(ppt1cvar,'Precipitation Code (denotes if trace amount)', 'null','T')
WriteFlagAttributes(ppt1fvar,'ISD flags for first precip field - see ISD documentation', INTMDI,'T')
try:
smin,smax=np.min(slp[np.where(slp != FLTMDI)[0]]),np.max(slp[np.where(slp != FLTMDI)[0]])
except ValueError:
smin,smax=FLTMDI,FLTMDI
WriteAttributes(slpvar,'Reported Sea Level Pressure at screen height (~2m)','latitude: longitude: time: point (nearest to reporting hour)',FLTMDI, 'hPa', 'T', smin, smax, 'latitude longitude elevation',standard_name = 'air_pressure_at_sea_level')
WriteFlagAttributes(slpfvar,'ISD flags for slp field - see ISD documentation',INTMDI,'T')
WriteAttributes(sdvar,'Reported Sunshine Duration','latitude: longitude: time: point (nearest to reporting hour)', INTMDI, 'minutes', 'T', 0, 6000, 'latitude longitude elevation', standard_name = 'duration_of_sunshine')
WriteFlagAttributes(sdfvar,'ISD flags sun duration field - see ISD documentation',INTMDI,'T')
WriteAttributes(wgstpvar,'Period of Maximum Wind Gust Speed', 'latitude: longitude: time: point (nearest to reporting hour)', INTMDI, 'Hours', 'T', 0, 48, 'latitude longitude elevation') #,standard_name = 'period_of_wind_gust')
WriteAttributes(wgstvvar,'Wind Gust Speed at mast height (~10m)','latitude: longitude: time: point (nearest to reporting hour)',FLTMDI, 'meters per second', 'T', 0, 200.0, 'latitude longitude elevation',standard_name = 'wind_speed_of_gust')
WriteFlagAttributes(wgstfvar,'ISD flags for wind gust field - see ISD documentation', INTMDI,'T')
if Extra:
WriteFlagAttributes(wtvar,'Wind observation type - see ISD documentation','null','T')
WriteAttributes(pswx2var,'Station reports of past significant weather phenomena (2)','latitude: longitude: point (interval: 1 day)', INTMDI, '1', 'T', 0, 9,'latitude longitude elevation')
WriteFlagAttributes(pswx2fvar,'ISD flags for reported past significant weather - see ISD documentation',INTMDI,'T')
WriteAttributes(pswx2pvar,'Period of significant weather report','latitude: longitude: point (interval: 1 day)', INTMDI, 'hour', 'T', 0, 24,'latitude longitude elevation')
WriteAttributes(swxvar,'Station reports of present significant weather phenomena','latitude: longitude: point (interval: 1 day)', INTMDI, '1', 'T', 0, 99,'latitude longitude elevation')
WriteFlagAttributes(swxfvar,'ISD flags for reported present significant weather - see ISD documentation',INTMDI,'T')
WriteAttributes(ppt2pvar,'Reported period over which precipitation was recorded','(as ISD variable precip2)', long(INTMDI), 'hour', 'T', 0, 98,'latitude longitude elevation precip2_depth')
WriteAttributes(ppt2dvar,'Depth of Precipitation Reported over time period','latitude: longitude: sum', FLTMDI, 'mm', 'T', 0, 999.8,'latitude longitude elevation precip2_period')
WriteFlagAttributes(ppt2cvar,'Denotes if trace amount', 'null','T')
WriteFlagAttributes(ppt2fvar,'ISD flags for second precip field - see ISD documentation', INTMDI,'T')
WriteAttributes(ppt3pvar,'Reported period over which precipitation was recorded','(as ISD variable precip3)', long(INTMDI), 'hour', 'T', 0, 98,'latitude longitude elevation precip3_depth')
WriteAttributes(ppt3dvar,'Depth of Precipitation Reported over time period','latitude: longitude: time: sum', FLTMDI, 'mm', 'T', 0, 999.8,'latitude longitude elevation precip3_period')
WriteFlagAttributes(ppt3cvar,'Denotes if trace amount', 'null','T')
WriteFlagAttributes(ppt3fvar,'ISD flags for third precip field - see ISD documentation', INTMDI,'T')
WriteAttributes(ppt4pvar,'Reported period over which precipitation was recorded','(as ISD variable precip4)', long(INTMDI), 'hour', 'T', 0, 98,'latitude longitude elevation precip4_depth')
WriteAttributes(ppt4dvar,'Depth of Precipitation Reported over time period','latitude: longitude: time: sum', FLTMDI, 'mm', 'T', 0, 999.8,'latitude longitude elevation precip4_period')
WriteFlagAttributes(ppt4cvar,'Denotes if trace amount', 'null','T')
WriteFlagAttributes(ppt4fvar,'ISD flags for fourth precip field - see ISD documentation', INTMDI,'T')
try:
xtmin,xtmax=np.min(maximum_temp_value[np.where(maximum_temp_value != FLTMDI)[0]]),np.max(maximum_temp_value[np.where(maximum_temp_value != FLTMDI)[0]])
except ValueError:
xtmin,xtmax=FLTMDI,FLTMDI
WriteAttributes(maxtpvar,'Reported period over which maximum temperature was recorded','latitude: longitude: point (interval: 1 day)', FLTMDI, 'hour', 'T', 0, 48,'latitude longitude elevation')
WriteAttributes(maxtvvar,'Dry bulb maximum temperature reported over time period','latitude: longitude: time: point (interval: 1 day)', FLTMDI,'degrees_Celsius','T',xtmin,xtmax)
WriteFlagAttributes(maxtfvar,'ISD flags for maximum temperature field - see ISD documentation', INTMDI, 'T')
try:
ntmin,ntmax=np.min(minimum_temp_value[np.where(minimum_temp_value != FLTMDI)[0]]),np.max(minimum_temp_value[np.where(minimum_temp_value != FLTMDI)[0]])
except ValueError:
ntmin,ntmax=FLTMDI,FLTMDI
WriteAttributes(mintpvar,'Reported period over which minimum temperature was recorded','latitude: longitude: point (interval: 1 day)', FLTMDI, 'hour', 'T', 0, 48,'latitude longitude elevation')
WriteAttributes(mintvvar,'Dry bulb minimum temperature reported over time period','latitude: longitude: time: point (interval: 1 day)', FLTMDI,'degree_Celsius','T',ntmin,ntmax)
WriteFlagAttributes(mintfvar,'ISD flags for minimum temperature field - see ISD documentation', INTMDI, 'T')
# global attributes
netcdf_outfile.station_information='Where station is a composite the station id refers to the primary source used in the timestep and does apply to all elements'
netcdf_outfile.file_created=dt.datetime.strftime(dt.datetime.now(), "%a %b %d, %H:%M %Y")
netcdf_outfile.station_id=station
netcdf_outfile.latitude=StationLat[st]
netcdf_outfile.longitude=StationLon[st]
netcdf_outfile.elevation=StationElv[st]
netcdf_outfile.Conventions="CF-1.6"
netcdf_outfile.date_created = dt.datetime.strftime(dt.datetime.now(), "%Y-%m-%d, %H:%M")
netcdf_outfile.history = "Created by mk_netcdf_files.py \n"
print "Writing data to netcdf file"
# write data into file
# ncdf.stringtochar changes np array of strings
# to times x character_length array of single
# character strings
timesvar[:]=times_out
# lonsvar[:]=StationLon[st]
# latsvar[:]=StationLat[st]
# altsvar[:]=StationElv[st]
# idsvar[:]=StationIDs[st]
stationsvar[:]=ncdf.stringtochar(input_station_id)
tempsvar[:]=temperatures
tempsflagsvar[:]=temperature_flags
dewsvar[:]=dewpoints
dewsflagsvar[:]=dewpoint_flags
tcvar[:]=total_cloud_cover
tcfvar[:]=total_cloud_flags
lcvar[:]=low_cloud_cover
lcfvar[:]=low_cloud_flags
mcvar[:]=mid_cloud_cover
mcfvar[:]=mid_cloud_flags
hcvar[:]=high_cloud_cover
hcfvar[:]=high_cloud_flags
cbvar[:]=cloud_base
cbfvar[:]=cloud_base_flags
wsvar[:]=windspeeds
wsfvar[:]=windspeeds_flags
wdvar[:]=winddirs
wdfvar[:]=winddirs_flags
pswx1var[:]=past_sigwx1
pswx1pvar[:]=past_sigwx1_period
pswx1fvar[:]=past_sigwx1_flag
ppt1pvar[:]=precip1_period
ppt1dvar[:]=precip1_depth
ppt1cvar[:]=ncdf.stringtochar(precip1_condition)
ppt1fvar[:]=precip1_flag
slpvar[:]=slp
slpfvar[:]=slp_flag
sdvar[:]=sun_duration
sdfvar[:]=sun_durationqc
wgstpvar[:]=wind_gust_period
wgstfvar[:]=wind_gust_flags
wgstvvar[:]=wind_gust_value
if Extra:
pswx2var[:]=past_sigwx2
pswx2pvar[:]=past_sigwx2_period
pswx2fvar[:]=past_sigwx2_flag
wtvar[:]=ncdf.stringtochar(windtypes)
swxvar[:]=present_sigwx
swxfvar[:]=present_sigwx_flags
ppt2pvar[:]=precip2_period
ppt2dvar[:]=precip2_depth
ppt2cvar[:]=ncdf.stringtochar(precip2_condition)
ppt2fvar[:]=precip2_flag
ppt3pvar[:]=precip3_period
ppt3dvar[:]=precip3_depth
ppt3cvar[:]=ncdf.stringtochar(precip3_condition)
ppt3fvar[:]=precip3_flag
ppt4pvar[:]=precip4_period
ppt4dvar[:]=precip4_depth
ppt4cvar[:]=ncdf.stringtochar(precip4_condition)
ppt4fvar[:]=precip4_flag
maxtpvar[:]=maximum_temp_period
maxtvvar[:]=maximum_temp_value
maxtfvar[:]=maximum_temp_flags
mintpvar[:]=minimum_temp_period
mintvvar[:]=minimum_temp_value
mintfvar[:]=minimum_temp_flags
# extra
netcdf_outfile.close()
# gzip file
cmd='gzip -f -9 '+netcdf_filename
print cmd
subprocess.call(cmd,shell=True)
print "Done station "+station
print dt.datetime.now()-dbg_sttime
print dt.datetime.now()
print "\n END"
return # MakeNetcdfFiles
#--------------------------------
if __name__=="__main__":
"""
Calls creation of netCDF files.
Uses sys.argv to input parameters - positional
restart_id, end_id, extra
Use direct call to test, and hard code the station ID here
"""
parser = argparse.ArgumentParser()
parser.add_argument('--start', dest='STARTYEAR', action='store', default = 1931,
help='Start year, default = 1931')
parser.add_argument('--end', dest='ENDYEAR', action='store', default = datetime.datetime.now().year,
help='End year, default = current')
parser.add_argument('--restart_id', dest='restart_id', action='store', default = "",
help='Restart ID for truncated run, default = ""')
parser.add_argument('--end_id', dest='end_id', action='store', default = "",
help='End ID for truncated run, default = ""')
parser.add_argument('--extra', dest='extra', action='store_true', default = False,
help='Include extra parameters, default = False')
args = parser.parse_args()
STARTYEAR=int(args.STARTYEAR)
ENDYEAR=int(args.ENDYEAR)
restart_id=args.restart_id
end_id=args.end_id
Extra=args.extra
print "\n Making NetCDF files from ISD ASCII files \n"
print "Reading data from %s" % ISD_DATA_LOCS
print "Writing data to %s" % NETCDF_DATA_LOCS
print "Start year %i, End year %i (inclusive)" % (STARTYEAR,ENDYEAR)
print "Restart ID = {}, End ID = {}, Include Extra parameters = {}".format(restart_id, end_id, Extra)
MakeNetcdfFiles(STARTYEAR, ENDYEAR, restart_id=restart_id,end_id=end_id,Extra=Extra)
|
bsd-3-clause
|
AndKe/ardupilot
|
libraries/SITL/examples/Morse/rover_skid.py
|
22
|
2201
|
'''
This is an example builder script that sets up a rover in Morse to
be driven by ArduPilot.
The rover has the basic set of sensors that ArduPilot needs
To start the simulation use this:
morse run rover_skid.py
Then connect with ArduPilot like this:
sim_vehicle.py --model morse --console --map
This model assumes you will setup a skid-steering rover with left throttle on
channel 1 and right throttle on channel 2, which means you need to set:
SERVO1_FUNCTION 73
SERVO3_FUNCTION 74
'''
from morse.builder import *
# use the ATRV rover
vehicle = ATRV()
vehicle.properties(Object = True, Graspable = False, Label = "Vehicle")
vehicle.translate(x=0.0, z=0.0)
# add a camera
camera = SemanticCamera(name="Camera")
camera.translate(x=0.2, y=0.3, z=0.9)
vehicle.append(camera)
camera.properties(cam_far=800)
camera.properties(Vertical_Flip=True)
# we could optionally stream the video to a port
#camera.add_stream('socket')
# add sensors needed for ArduPilot operation to a vehicle
pose = Pose()
vehicle.append(pose)
imu = IMU()
vehicle.append(imu)
gps = GPS()
gps.alter('UTM')
vehicle.append(gps)
velocity = Velocity()
vehicle.append(velocity)
# create a compound sensor of all of the individual sensors and stream it
all_sensors = CompoundSensor([imu, gps, velocity, pose])
all_sensors.add_stream('socket')
vehicle.append(all_sensors)
# make the vehicle controllable with speed and angular velocity
# this will be available on port 60001 by default
# an example command is:
# {"v":2, "w":1}
# which is 2m/s fwd, and rotating left at 1 radian/second
motion = MotionVW()
vehicle.append(motion)
motion.add_stream('socket')
# this would allow us to control the vehicle with a keyboard
# we don't enable it as it causes issues with sensor consistency
#keyboard = Keyboard()
#keyboard.properties(Speed=3.0)
#vehicle.append(keyboard)
# Environment
env = Environment('land-1/trees')
env.set_camera_location([10.0, -10.0, 10.0])
env.set_camera_rotation([1.0470, 0, 0.7854])
env.select_display_camera(camera)
env.set_camera_clip(clip_end=1000)
# startup at CMAC. A location is needed for the magnetometer
env.properties(longitude = 149.165230, latitude = -35.363261, altitude = 584.0)
|
gpl-3.0
|
LonamiWebs/Telethon
|
readthedocs/custom_roles.py
|
2
|
2096
|
from docutils import nodes, utils
from docutils.parsers.rst.roles import set_classes
def make_link_node(rawtext, app, name, options):
"""
Create a link to the TL reference.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param name: Name of the object to link to
:param options: Options dictionary passed to role func.
"""
try:
base = app.config.tl_ref_url
if not base:
raise AttributeError
except AttributeError as e:
raise ValueError('tl_ref_url config value is not set') from e
if base[-1] != '/':
base += '/'
set_classes(options)
node = nodes.reference(rawtext, utils.unescape(name),
refuri='{}?q={}'.format(base, name),
**options)
return node
# noinspection PyUnusedLocal
def tl_role(name, rawtext, text, lineno, inliner, options=None, content=None):
"""
Link to the TL reference.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
if options is None:
options = {}
# TODO Report error on type not found?
# Usage:
# msg = inliner.reporter.error(..., line=lineno)
# return [inliner.problematic(rawtext, rawtext, msg)], [msg]
app = inliner.document.settings.env.app
node = make_link_node(rawtext, app, text, options)
return [node], []
def setup(app):
"""
Install the plugin.
:param app: Sphinx application context.
"""
app.add_role('tl', tl_role)
app.add_config_value('tl_ref_url', None, 'env')
return
|
mit
|
jmdejong/Asciifarm
|
asciifarm/common/messages.py
|
1
|
4086
|
import re
import unicodedata
import json
class InvalidMessageError(Exception):
errType = "invalidmessage"
description = ""
def __init__(self, description="", errType=None):
self.description = description
if errType is not None:
self.errType = errType
def toMessage(self):
return ErrorMessage(self.errType, self.description)
class InvalidNameError(InvalidMessageError):
errType = "invalidname"
class Message:
@classmethod
def msgType(cls):
return cls.typename
def to_json(self):
raise NotImplementedError
def to_json_bytes(self):
return bytes(json.dumps(self.to_json()), "utf-8")
@classmethod
def from_json(cls, jsonobj):
raise NotImplementedError
class ClientToServerMessage(Message):
def body(self):
raise NotImplementedError
def to_json(self):
return [self.typename, self.body()]
@classmethod
def from_json(cls, jsonlist):
assert len(jsonlist) == 2, InvalidMessageError
typename, body = jsonlist
assert typename == cls.msgType(), InvalidMessageError
return cls(body)
class NameMessage(ClientToServerMessage):
typename = "name"
categories = {"Lu", "Ll", "Lt", "Lm", "Lo", "Nd", "Nl", "No", "Pc"}
def __init__(self, name):
assert isinstance(name, str), InvalidNameError("name must be a string")
assert (len(name) > 0), InvalidNameError("name needs at least one character")
assert (len(bytes(name, "utf-8")) <= 256), InvalidNameError("name may not be longer than 256 utf8 bytes")
if name[0] != "~":
for char in name:
category = unicodedata.category(char)
assert category in self.categories, InvalidNameError("all name caracters must be in these unicode categories: " + "|".join(self.categories) + " (except for tildenames)")
self.name = name
def body(self):
return self.name
class InputMessage(ClientToServerMessage):
typename = "input"
def __init__(self, inp):
self.inp = inp
def body(self):
return self.inp
class ChatMessage(ClientToServerMessage):
typename = "chat"
def __init__(self, text):
assert isinstance(text, str), InvalidMessageError("chat message must be a string")
assert text.isprintable(), InvalidMessageError("chat messages may only contain printable unicode characters")
self.text = text
def body(self):
return self.text
class ServerToClientMessage(Message):
msglen = 0
@classmethod
def from_json(cls, jsonlist):
assert len(jsonlist) == cls.msglen, InvalidMessageError
assert jsonlist[0] == cls.msgType(), InvalidMessageError
return cls(*jsonlist[1:])
class MessageMessage(ServerToClientMessage): # this name feels stupid
""" A message to inform the client. This is meant to be read by the user"""
typename = "message"
msglen = 3
def __init__(self, text, type=""):
self.text = text
self.type = type
def to_json(self):
return [self.typename, self.text, self.type]
class WorldMessage(ServerToClientMessage):
""" A message about the world state """
typename = "world"
msglen = 2
def __init__(self, updates):
assert isinstance(updates, list), InvalidMessageError
self.updates = updates
def to_json(self):
return [self.typename, self.updates]
class ErrorMessage(ServerToClientMessage):
typename = "error"
msglen = 3
def __init__(self, errType, description=""):
self.errType = errType
self.description = description
def to_json(self):
return [self.typename, self.errType, self.description]
messages = {message.msgType(): message for message in [
NameMessage,
InputMessage,
ChatMessage,
WorldMessage,
ErrorMessage,
MessageMessage
]}
|
gpl-3.0
|
GaetanCambier/CouchPotatoServer
|
couchpotato/core/media/movie/providers/trailer/hdtrailers.py
|
64
|
4275
|
from string import digits, ascii_letters
import re
from bs4 import SoupStrainer, BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import mergeDicts, getTitle, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.trailer.base import TrailerProvider
from requests import HTTPError
log = CPLog(__name__)
autoload = 'HDTrailers'
class HDTrailers(TrailerProvider):
urls = {
'api': 'http://www.hd-trailers.net/movie/%s/',
'backup': 'http://www.hd-trailers.net/blog/',
}
providers = ['apple.ico', 'yahoo.ico', 'moviefone.ico', 'myspace.ico', 'favicon.ico']
only_tables_tags = SoupStrainer('table')
def search(self, group):
movie_name = getTitle(group)
url = self.urls['api'] % self.movieUrlName(movie_name)
try:
data = self.getCache('hdtrailers.%s' % getIdentifier(group), url, show_error = False)
except HTTPError:
log.debug('No page found for: %s', movie_name)
data = None
result_data = {'480p': [], '720p': [], '1080p': []}
if not data:
return result_data
did_alternative = False
for provider in self.providers:
results = self.findByProvider(data, provider)
# Find alternative
if results.get('404') and not did_alternative:
results = self.findViaAlternative(group)
did_alternative = True
result_data = mergeDicts(result_data, results)
return result_data
def findViaAlternative(self, group):
results = {'480p': [], '720p': [], '1080p': []}
movie_name = getTitle(group)
url = "%s?%s" % (self.urls['backup'], tryUrlencode({'s':movie_name}))
try:
data = self.getCache('hdtrailers.alt.%s' % getIdentifier(group), url, show_error = False)
except HTTPError:
log.debug('No alternative page found for: %s', movie_name)
data = None
if not data:
return results
try:
html = BeautifulSoup(data, parse_only = self.only_tables_tags)
result_table = html.find_all('h2', text = re.compile(movie_name))
for h2 in result_table:
if 'trailer' in h2.lower():
parent = h2.parent.parent.parent
trailerLinks = parent.find_all('a', text = re.compile('480p|720p|1080p'))
try:
for trailer in trailerLinks:
results[trailer].insert(0, trailer.parent['href'])
except:
pass
except AttributeError:
log.debug('No trailers found in via alternative.')
return results
def findByProvider(self, data, provider):
results = {'480p':[], '720p':[], '1080p':[]}
try:
html = BeautifulSoup(data, parse_only = self.only_tables_tags)
result_table = html.find('table', attrs = {'class':'bottomTable'})
for tr in result_table.find_all('tr'):
trtext = str(tr).lower()
if 'clips' in trtext:
break
if 'trailer' in trtext and not 'clip' in trtext and provider in trtext and not '3d' in trtext:
if 'trailer' not in tr.find('span', 'standardTrailerName').text.lower():
continue
resolutions = tr.find_all('td', attrs = {'class':'bottomTableResolution'})
for res in resolutions:
if res.a and str(res.a.contents[0]) in results:
results[str(res.a.contents[0])].insert(0, res.a['href'])
except AttributeError:
log.debug('No trailers found in provider %s.', provider)
results['404'] = True
return results
def movieUrlName(self, string):
safe_chars = ascii_letters + digits + ' '
r = ''.join([char if char in safe_chars else ' ' for char in string])
name = re.sub('\s+' , '-', r).lower()
try:
int(name)
return '-' + name
except:
return name
|
gpl-3.0
|
pedro2d10/SickRage-FR
|
sickbeard/providers/freshontv.py
|
1
|
10398
|
# coding=utf-8
# Author: Idan Gutman
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
from requests.utils import add_dict_to_cookiejar, dict_from_cookiejar
import time
import traceback
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class FreshOnTVProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "FreshOnTV")
self._uid = None
self._hash = None
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.freeleech = False
self.cache = tvcache.TVCache(self)
self.urls = {'base_url': 'https://freshon.tv/',
'login': 'https://freshon.tv/login.php?action=makelogin',
'detail': 'https://freshon.tv/details.php?id=%s',
'search': 'https://freshon.tv/browse.php?incldead=%s&words=0&cat=0&search=%s',
'download': 'https://freshon.tv/download.php?id=%s&type=torrent'}
self.url = self.urls['base_url']
self.cookies = None
def _check_auth(self):
if not self.username or not self.password:
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return True
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
if self._uid and self._hash:
add_dict_to_cookiejar(self.session.cookies, self.cookies)
else:
login_params = {'username': self.username,
'password': self.password,
'login': 'submit'}
response = self.get_url(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('/logout.php', response):
try:
if dict_from_cookiejar(self.session.cookies)['uid'] and dict_from_cookiejar(self.session.cookies)['pass']:
self._uid = dict_from_cookiejar(self.session.cookies)['uid']
self._hash = dict_from_cookiejar(self.session.cookies)['pass']
self.cookies = {'uid': self._uid,
'pass': self._hash}
return True
except Exception:
logger.log(u"Unable to login to provider (cookie)", logger.WARNING)
return False
else:
if re.search('Username does not exist in the userbase or the account is not confirmed yet.', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
if re.search('DDoS protection by CloudFlare', response):
logger.log(u"Unable to login to provider due to CloudFlare DDoS javascript check", logger.WARNING)
return False
def search(self, search_params, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
results = []
if not self.login():
return results
freeleech = '3' if self.freeleech else '0'
for mode in search_params:
items = []
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_params[mode]:
if mode != 'RSS':
logger.log(u"Search string: {search}".format(search=search_string.decode('utf-8')),
logger.DEBUG)
search_url = self.urls['search'] % (freeleech, search_string)
logger.log(u"Search URL: %s" % search_url, logger.DEBUG)
init_html = self.get_url(search_url)
max_page_number = 0
if not init_html:
logger.log(u"No data returned from provider", logger.DEBUG)
continue
try:
with BS4Parser(init_html, 'html5lib') as init_soup:
# Check to see if there is more than 1 page of results
pager = init_soup.find('div', {'class': 'pager'})
if pager:
page_links = pager.find_all('a', href=True)
else:
page_links = []
if len(page_links) > 0:
for lnk in page_links:
link_text = lnk.text.strip()
if link_text.isdigit():
page_int = int(link_text)
if page_int > max_page_number:
max_page_number = page_int
# limit page number to 15 just in case something goes wrong
if max_page_number > 15:
max_page_number = 15
# limit RSS search
if max_page_number > 3 and mode == 'RSS':
max_page_number = 3
except Exception:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
continue
data_response_list = [init_html]
# Freshon starts counting pages from zero, even though it displays numbers from 1
if max_page_number > 1:
for i in range(1, max_page_number):
time.sleep(1)
page_search_url = search_url + '&page=' + str(i)
# '.log(u"Search string: " + page_search_url, logger.DEBUG)
page_html = self.get_url(page_search_url)
if not page_html:
continue
data_response_list.append(page_html)
try:
for data_response in data_response_list:
with BS4Parser(data_response, 'html5lib') as html:
torrent_rows = html.findAll("tr", {"class": re.compile('torrent_[0-9]*')})
# Continue only if a Release is found
if len(torrent_rows) == 0:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
for individual_torrent in torrent_rows:
# skip if torrent has been nuked due to poor quality
if individual_torrent.find('img', alt='Nuked') is not None:
continue
try:
title = individual_torrent.find('a', {'class': 'torrent_name_link'})['title']
except Exception:
logger.log(u"Unable to parse torrent title. Traceback: %s " % traceback.format_exc(), logger.WARNING)
continue
try:
details_url = individual_torrent.find('a', {'class': 'torrent_name_link'})['href']
torrent_id = int((re.match('.*?([0-9]+)$', details_url).group(1)).strip())
download_url = self.urls['download'] % (str(torrent_id))
seeders = try_int(individual_torrent.find('td', {'class': 'table_seeders'}).find('span').text.strip(), 1)
leechers = try_int(individual_torrent.find('td', {'class': 'table_leechers'}).find('a').text.strip(), 0)
torrent_size = individual_torrent.find('td', {'class': 'table_size'}).get_text()
size = convert_size(torrent_size) or -1
except Exception:
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)
items.append(item)
except Exception:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda tup: tup[3], reverse=True)
results += items
return results
def seed_ratio(self):
return self.ratio
provider = FreshOnTVProvider()
|
gpl-3.0
|
customcommander/selenium
|
py/selenium/webdriver/remote/errorhandler.py
|
50
|
8394
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import ElementNotSelectableException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import InvalidCookieDomainException
from selenium.common.exceptions import InvalidElementStateException
from selenium.common.exceptions import InvalidSelectorException
from selenium.common.exceptions import ImeNotAvailableException
from selenium.common.exceptions import ImeActivationFailedException
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import NoSuchWindowException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import UnableToSetCookieException
from selenium.common.exceptions import UnexpectedAlertPresentException
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import ErrorInResponseException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import MoveTargetOutOfBoundsException
try:
basestring
except NameError: # Python 3.x
basestring = str
class ErrorCode(object):
"""
Error codes defined in the WebDriver wire protocol.
"""
# Keep in sync with org.openqa.selenium.remote.ErrorCodes and errorcodes.h
SUCCESS = 0
NO_SUCH_ELEMENT = [7, 'no such element']
NO_SUCH_FRAME = [8, 'no such frame']
UNKNOWN_COMMAND = [9, 'unknown command']
STALE_ELEMENT_REFERENCE = [10, 'stale element reference']
ELEMENT_NOT_VISIBLE = [11, 'element not visible']
INVALID_ELEMENT_STATE = [12, 'invalid element state']
UNKNOWN_ERROR = [13, 'unknown error']
ELEMENT_IS_NOT_SELECTABLE = [15, 'element not selectable']
JAVASCRIPT_ERROR = [17, 'javascript error']
XPATH_LOOKUP_ERROR = [19, 'invalid selector']
TIMEOUT = [21, 'timeout']
NO_SUCH_WINDOW = [23, 'no such window']
INVALID_COOKIE_DOMAIN = [24, 'invalid cookie domain']
UNABLE_TO_SET_COOKIE = [25, 'unable to set cookie']
UNEXPECTED_ALERT_OPEN = [26, 'unexpected alert open']
NO_ALERT_OPEN = [27, 'no such alert']
SCRIPT_TIMEOUT = [28, 'script timeout']
INVALID_ELEMENT_COORDINATES = [29, 'invalid element coordinates']
IME_NOT_AVAILABLE = [30, 'ime not available']
IME_ENGINE_ACTIVATION_FAILED = [31, 'ime engine activation failed']
INVALID_SELECTOR = [32, 'invalid selector']
MOVE_TARGET_OUT_OF_BOUNDS = [34, 'move target out of bounds']
INVALID_XPATH_SELECTOR = [51, 'invalid selector']
INVALID_XPATH_SELECTOR_RETURN_TYPER = [52, 'invalid selector']
METHOD_NOT_ALLOWED = [405, 'unsupported operation']
class ErrorHandler(object):
"""
Handles errors returned by the WebDriver server.
"""
def check_response(self, response):
"""
Checks that a JSON response from the WebDriver does not have an error.
:Args:
- response - The JSON response from the WebDriver server as a dictionary
object.
:Raises: If the response contains an error message.
"""
status = response.get('status', None)
if status is None or status == ErrorCode.SUCCESS:
return
value = None
message = response.get("message", "")
screen = response.get("screen", "")
stacktrace = None
if isinstance(status, int):
value_json = response.get('value', None)
if value_json and isinstance(value_json, basestring):
import json
value = json.loads(value_json)
status = value['status']
message = value['message']
exception_class = ErrorInResponseException
if status in ErrorCode.NO_SUCH_ELEMENT:
exception_class = NoSuchElementException
elif status in ErrorCode.NO_SUCH_FRAME:
exception_class = NoSuchFrameException
elif status in ErrorCode.NO_SUCH_WINDOW:
exception_class = NoSuchWindowException
elif status in ErrorCode.STALE_ELEMENT_REFERENCE:
exception_class = StaleElementReferenceException
elif status in ErrorCode.ELEMENT_NOT_VISIBLE:
exception_class = ElementNotVisibleException
elif status in ErrorCode.INVALID_ELEMENT_STATE:
exception_class = InvalidElementStateException
elif status in ErrorCode.INVALID_SELECTOR \
or status in ErrorCode.INVALID_XPATH_SELECTOR \
or status in ErrorCode.INVALID_XPATH_SELECTOR_RETURN_TYPER:
exception_class = InvalidSelectorException
elif status in ErrorCode.ELEMENT_IS_NOT_SELECTABLE:
exception_class = ElementNotSelectableException
elif status in ErrorCode.INVALID_COOKIE_DOMAIN:
exception_class = WebDriverException
elif status in ErrorCode.UNABLE_TO_SET_COOKIE:
exception_class = WebDriverException
elif status in ErrorCode.TIMEOUT:
exception_class = TimeoutException
elif status in ErrorCode.SCRIPT_TIMEOUT:
exception_class = TimeoutException
elif status in ErrorCode.UNKNOWN_ERROR:
exception_class = WebDriverException
elif status in ErrorCode.UNEXPECTED_ALERT_OPEN:
exception_class = UnexpectedAlertPresentException
elif status in ErrorCode.NO_ALERT_OPEN:
exception_class = NoAlertPresentException
elif status in ErrorCode.IME_NOT_AVAILABLE:
exception_class = ImeNotAvailableException
elif status in ErrorCode.IME_ENGINE_ACTIVATION_FAILED:
exception_class = ImeActivationFailedException
elif status in ErrorCode.MOVE_TARGET_OUT_OF_BOUNDS:
exception_class = MoveTargetOutOfBoundsException
else:
exception_class = WebDriverException
value = response['value']
if isinstance(value, basestring):
if exception_class == ErrorInResponseException:
raise exception_class(response, value)
raise exception_class(value)
message = ''
if 'message' in value:
message = value['message']
screen = None
if 'screen' in value:
screen = value['screen']
stacktrace = None
if 'stackTrace' in value and value['stackTrace']:
stacktrace = []
try:
for frame in value['stackTrace']:
line = self._value_or_default(frame, 'lineNumber', '')
file = self._value_or_default(frame, 'fileName', '<anonymous>')
if line:
file = "%s:%s" % (file, line)
meth = self._value_or_default(frame, 'methodName', '<anonymous>')
if 'className' in frame:
meth = "%s.%s" % (frame['className'], meth)
msg = " at %s (%s)"
msg = msg % (meth, file)
stacktrace.append(msg)
except TypeError:
pass
if exception_class == ErrorInResponseException:
raise exception_class(response, message)
elif exception_class == UnexpectedAlertPresentException and 'alert' in value:
raise exception_class(message, screen, stacktrace, value['alert'].get('text'))
raise exception_class(message, screen, stacktrace)
def _value_or_default(self, obj, key, default):
return obj[key] if key in obj else default
|
apache-2.0
|
eduNEXT/edunext-platform
|
openedx/core/djangoapps/credit/tasks.py
|
4
|
5234
|
"""
This file contains celery tasks for credit course views.
"""
import six
from celery import task
from celery.utils.log import get_task_logger
from django.conf import settings
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from openedx.core.djangoapps.credit.api import set_credit_requirements
from openedx.core.djangoapps.credit.exceptions import InvalidCreditRequirements
from openedx.core.djangoapps.credit.models import CreditCourse
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
LOGGER = get_task_logger(__name__)
@task(default_retry_delay=settings.CREDIT_TASK_DEFAULT_RETRY_DELAY, max_retries=settings.CREDIT_TASK_MAX_RETRIES)
def update_credit_course_requirements(course_id):
"""
Updates course requirements table for a course.
Args:
course_id(str): A string representation of course identifier
Returns:
None
"""
try:
course_key = CourseKey.from_string(course_id)
is_credit_course = CreditCourse.is_credit_course(course_key)
if is_credit_course:
requirements = _get_course_credit_requirements(course_key)
set_credit_requirements(course_key, requirements)
except (InvalidKeyError, ItemNotFoundError, InvalidCreditRequirements) as exc:
LOGGER.error(u'Error on adding the requirements for course %s - %s', course_id, six.text_type(exc))
raise update_credit_course_requirements.retry(args=[course_id], exc=exc)
else:
LOGGER.info(u'Requirements added for course %s', course_id)
def _get_course_credit_requirements(course_key):
"""
Returns the list of credit requirements for the given course.
This will also call into the edx-proctoring subsystem to also
produce proctored exam requirements for credit bearing courses
It returns the minimum_grade_credit and also the ICRV checkpoints
if any were added in the course
Args:
course_key (CourseKey): Identifier for the course.
Returns:
List of credit requirements (dictionaries)
"""
min_grade_requirement = _get_min_grade_requirement(course_key)
proctored_exams_requirements = _get_proctoring_requirements(course_key)
sorted_exam_requirements = sorted(
proctored_exams_requirements, key=lambda x: (x['start_date'] is None, x['start_date'], x['display_name'])
)
credit_requirements = (
min_grade_requirement + sorted_exam_requirements
)
return credit_requirements
def _get_min_grade_requirement(course_key):
"""
Get list of 'minimum_grade_credit' requirement for the given course.
Args:
course_key (CourseKey): Identifier for the course.
Returns:
The list of minimum_grade_credit requirements
"""
course = modulestore().get_course(course_key, depth=0)
try:
return [
{
"namespace": "grade",
"name": "grade",
"display_name": "Minimum Grade",
"criteria": {
"min_grade": course.minimum_grade_credit
},
}
]
except AttributeError:
LOGGER.error(u"The course %s does not has minimum_grade_credit attribute", six.text_type(course.id))
else:
return []
def _get_proctoring_requirements(course_key):
"""
Will return list of requirements regarding any exams that have been
marked as proctored exams. For credit-bearing courses, all
proctored exams must be validated and confirmed from a proctoring
standpoint. The passing grade on an exam is not enough.
Args:
course_key: The key of the course in question
Returns:
list of requirements dictionary, one per active proctored exam
"""
# Note: Need to import here as there appears to be
# a circular reference happening when launching Studio
# process
from edx_proctoring.api import get_all_exams_for_course
requirements = []
for exam in get_all_exams_for_course(six.text_type(course_key)):
if exam['is_proctored'] and exam['is_active'] and not exam['is_practice_exam']:
try:
usage_key = UsageKey.from_string(exam['content_id'])
proctor_block = modulestore().get_item(usage_key)
except (InvalidKeyError, ItemNotFoundError):
LOGGER.info(u"Invalid content_id '%s' for proctored block '%s'", exam['content_id'], exam['exam_name'])
proctor_block = None
if proctor_block:
requirements.append(
{
'namespace': 'proctored_exam',
'name': exam['content_id'],
'display_name': exam['exam_name'],
'start_date': proctor_block.start if proctor_block.start else None,
'criteria': {},
})
if requirements:
log_msg = (
u'Registering the following as \'proctored_exam\' credit requirements: {log_msg}'.format(
log_msg=requirements
)
)
LOGGER.info(log_msg)
return requirements
|
agpl-3.0
|
mozilla/build-mozharness
|
external_tools/mouse_and_screen_resolution.py
|
12
|
5541
|
#! /usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Script name: mouse_and_screen_resolution.py
# Purpose: Sets mouse position and screen resolution for Windows 7 32-bit slaves
# Author(s): Zambrano Gasparnian, Armen <[email protected]>
# Target: Python 2.5 or newer
#
from optparse import OptionParser
from ctypes import windll, Structure, c_ulong, byref
try:
import json
except:
import simplejson as json
import os
import sys
import urllib2
import socket
import platform
import time
default_screen_resolution = {"x": 1024, "y": 768}
default_mouse_position = {"x": 1010, "y": 10}
def wfetch(url, retries=5):
while True:
try:
return urllib2.urlopen(url, timeout=30).read()
except urllib2.HTTPError, e:
print("Failed to fetch '%s': %s" % (url, str(e)))
except urllib2.URLError, e:
print("Failed to fetch '%s': %s" % (url, str(e)))
except socket.timeout, e:
print("Time out accessing %s: %s" % (url, str(e)))
except socket.error, e:
print("Socket error when accessing %s: %s" % (url, str(e)))
if retries < 0:
raise Exception("Could not fetch url '%s'" % url)
retries -= 1
print("Retrying")
time.sleep(60)
def main():
'''
We load the configuration file from:
https://hg.mozilla.org/mozilla-central/raw-file/default/build/machine-configuration.json
'''
parser = OptionParser()
parser.add_option(
"--configuration-url", dest="configuration_url", type="string",
help="It indicates from where to download the configuration file.")
(options, args) = parser.parse_args()
if options.configuration_url == None:
print "You need to specify --configuration-url."
return 1
if not (platform.version().startswith('6.1.760') and not 'PROGRAMFILES(X86)' in os.environ):
# We only want to run this for Windows 7 32-bit
print "INFO: This script was written to be used with Windows 7 32-bit machines."
return 0
try:
conf_dict = json.loads(wfetch(options.configuration_url))
new_screen_resolution = conf_dict["win7"]["screen_resolution"]
new_mouse_position = conf_dict["win7"]["mouse_position"]
except urllib2.HTTPError, e:
print "This branch does not seem to have the configuration file %s" % str(e)
print "Let's fail over to 1024x768."
new_screen_resolution = default_screen_resolution
new_mouse_position = default_mouse_position
except urllib2.URLError, e:
print "INFRA-ERROR: We couldn't reach hg.mozilla.org: %s" % str(e)
return 1
except Exception, e:
print "ERROR: We were not expecting any more exceptions: %s" % str(e)
return 1
current_screen_resolution = queryScreenResolution()
print "Screen resolution (current): (%(x)s, %(y)s)" % (current_screen_resolution)
if current_screen_resolution == new_screen_resolution:
print "No need to change the screen resolution."
else:
print "Changing the screen resolution..."
try:
changeScreenResolution(new_screen_resolution["x"], new_screen_resolution["y"])
except Exception, e:
print "INFRA-ERROR: We have attempted to change the screen resolution but " + \
"something went wrong: %s" % str(e)
return 1
time.sleep(3) # just in case
current_screen_resolution = queryScreenResolution()
print "Screen resolution (new): (%(x)s, %(y)s)" % current_screen_resolution
print "Mouse position (current): (%(x)s, %(y)s)" % (queryMousePosition())
setCursorPos(new_mouse_position["x"], new_mouse_position["y"])
current_mouse_position = queryMousePosition()
print "Mouse position (new): (%(x)s, %(y)s)" % (current_mouse_position)
if current_screen_resolution != new_screen_resolution or current_mouse_position != new_mouse_position:
print "INFRA-ERROR: The new screen resolution or mouse positions are not what we expected"
return 1
else:
return 0
class POINT(Structure):
_fields_ = [("x", c_ulong), ("y", c_ulong)]
def queryMousePosition():
pt = POINT()
windll.user32.GetCursorPos(byref(pt))
return { "x": pt.x, "y": pt.y}
def setCursorPos(x, y):
windll.user32.SetCursorPos(x, y)
def queryScreenResolution():
return {"x": windll.user32.GetSystemMetrics(0),
"y": windll.user32.GetSystemMetrics(1)}
def changeScreenResolution(xres = None, yres = None, BitsPerPixel = None):
import struct
DM_BITSPERPEL = 0x00040000
DM_PELSWIDTH = 0x00080000
DM_PELSHEIGHT = 0x00100000
CDS_FULLSCREEN = 0x00000004
SIZEOF_DEVMODE = 148
DevModeData = struct.calcsize("32BHH") * '\x00'
DevModeData += struct.pack("H", SIZEOF_DEVMODE)
DevModeData += struct.calcsize("H") * '\x00'
dwFields = (xres and DM_PELSWIDTH or 0) | (yres and DM_PELSHEIGHT or 0) | (BitsPerPixel and DM_BITSPERPEL or 0)
DevModeData += struct.pack("L", dwFields)
DevModeData += struct.calcsize("l9h32BHL") * '\x00'
DevModeData += struct.pack("LLL", BitsPerPixel or 0, xres or 0, yres or 0)
DevModeData += struct.calcsize("8L") * '\x00'
return windll.user32.ChangeDisplaySettingsA(DevModeData, 0)
if __name__ == '__main__':
sys.exit(main())
|
mpl-2.0
|
JocelynDelalande/xhtml2pdf
|
demo/cherrypy/demo-cherrypy.py
|
154
|
2187
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
#############################################
## (C)opyright by Dirk Holtwick, 2008 ##
## All rights reserved ##
#############################################
import cherrypy as cp
import sx.pisa3 as pisa
import cStringIO as StringIO
try:
import kid
except:
kid = None
class PDFDemo(object):
"""
Simple demo showing a form where you can enter some HTML code.
After sending PISA is used to convert HTML to PDF and publish
it directly.
"""
@cp.expose
def index(self):
if kid:
return file("demo-cherrypy.html","r").read()
return """
<html><body>
Please enter some HTML code:
<form action="download" method="post" enctype="multipart/form-data">
<textarea name="data">Hello <strong>World</strong></textarea>
<br />
<input type="submit" value="Convert HTML to PDF" />
</form>
</body></html>
"""
@cp.expose
def download(self, data):
if kid:
data = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:py="http://purl.org/kid/ns#">
<head>
<title>PDF Demo</title>
</head>
<body>%s</body>
</html>""" % data
test = kid.Template(source=data)
data = test.serialize(output='xhtml')
result = StringIO.StringIO()
pdf = pisa.CreatePDF(
StringIO.StringIO(data),
result
)
if pdf.err:
return "We had some errors in HTML"
else:
cp.response.headers["content-type"] = "application/pdf"
return result.getvalue()
cp.tree.mount(PDFDemo())
if __name__ == '__main__':
import os.path
cp.config.update(os.path.join(__file__.replace(".py", ".conf")))
cp.server.quickstart()
cp.engine.start()
|
apache-2.0
|
vitaly-krugl/nupic
|
examples/opf/experiments/multistep/base/permutations_simple_3.py
|
10
|
4041
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
import os
from pkg_resources import resources_filename
from nupic.swarming.permutation_helpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'field2'
permutations = {
# Encoder permutation choices
# Example:
#
# '__gym_encoder' : PermuteEncoder('gym', 'SDRCategoryEncoder', w=7,
# n=100),
#
# '__address_encoder' : PermuteEncoder('address', 'SDRCategoryEncoder',
# w=7, n=100),
#
# '__timestamp_timeOfDay_encoder' : PermuteEncoder('timestamp',
# 'DateEncoder.timeOfDay', w=7, radius=PermuteChoices([1, 8])),
#
# '__timestamp_dayOfWeek_encoder' : PermuteEncoder('timestamp',
# 'DateEncoder.dayOfWeek', w=7, radius=PermuteChoices([1, 3])),
#
# '__consumption_encoder' : PermuteEncoder('consumption', 'ScalarEncoder',
# w=7, n=PermuteInt(13, 500, 20), minval=0,
# maxval=PermuteInt(100, 300, 25)),
#
# (generated from PERM_ENCODER_CHOICES)
'predictedField': 'field2',
'predictionSteps': [1,3],
relativePath = os.path.join("examples", "opf", "experiments", "multistep",
"datasets", "simple_3.csv")
'dataSource': 'file://%s' % (resource_filename("nupic", relativePath)),
'__field2_encoder' : PermuteEncoder(fieldName='field2',
clipInput=True, minval = 0, maxval=50,
encoderClass='ScalarEncoder',
w=21, n=PermuteChoices([500])),
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*field2.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = 'prediction:aae:window=1000:field=consumption')
minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=3:window=200:field=field2"
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.